content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import re
class CCY:
BYN = "BYN"
RUB = "RUB"
USD = "USD"
EUR = "EUR"
@classmethod
def from_string(cls, s):
if s is None:
return cls.BYN
ccys = [
(r'r[u,r][r,b]?', cls.RUB),
(r'b[y,r]?n?', cls.BYN),
(r'usd?', cls.USD),
(r'eur?', cls.EUR),
]
for ccy in ccys:
m = re.match(ccy[0], s, re.IGNORECASE)
if m is not None:
return ccy[1]
raise ValueError(f"Invalid currency string {s}, try rub, byn, usd, or eur")
class Tables:
SPENDINGS = "spendings"
MILEAGE = "mileage"
REMINDERS = "reminders"
class Categories:
GAS = "gas"
MILEAGE = "mileage"
CAR_GOODS = "car-goods"
REPAIR = "repair"
REMINDER_MILEAGE = "reminder-mileage"
|
nilq/baby-python
|
python
|
"""
================
Compute p-values
================
For the visualization, we used a comodulogram.
"""
from tensorpac import Pac
from tensorpac.signals import pac_signals_wavelet
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
# First, we generate a dataset of signals artificially coupled between 10hz
# and 100hz. By default, this dataset is organized as (n_epochs, n_times) where
# n_times is the number of time points.
n_epochs = 1 # number of datasets
sf = 512. # sampling frequency
data, time = pac_signals_wavelet(f_pha=6, f_amp=90, noise=.8,
n_epochs=n_epochs, n_times=4000, sf=sf)
# First, let's use the MVL, without any further correction by surrogates :
p = Pac(idpac=(1, 2, 0), f_pha=(2, 15, 2, .2), f_amp=(60, 120, 10, 1))
xpac = p.filterfit(sf, data, n_perm=200, p=.05)
pval = p.pvalues
p.comodulogram(xpac.mean(-1), title=str(p), cmap='Spectral_r', vmin=0.,
pvalues=pval, levels=.05)
p.show()
|
nilq/baby-python
|
python
|
import multiprocessing
def validate_chunks(n):
if n == 0:
raise AssertionError('The number of chunks cannot be 0 ')
elif n <= -2:
raise AssertionError('The number of chunks should be -1 or > 0')
def get_num_partitions(given_partitions, n):
if given_partitions == -1:
return multiprocessing.cpu_count()
elif given_partitions > n:
return n
else:
return given_partitions
def get_num_cores():
return multiprocessing.cpu_count()
def wrap(object):
return object
|
nilq/baby-python
|
python
|
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DataType,
TimestampType,
)
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class MedicationAdministrationSchema:
"""
Describes the event of a patient consuming or otherwise being administered a
medication. This may be as simple as swallowing a tablet or it may be a long
running infusion. Related resources tie this event to the authorizing
prescription, and the specific encounter between patient and health care
practitioner.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Describes the event of a patient consuming or otherwise being administered a
medication. This may be as simple as swallowing a tablet or it may be a long
running infusion. Related resources tie this event to the authorizing
prescription, and the specific encounter between patient and health care
practitioner.
resourceType: This is a MedicationAdministration resource
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content might not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content. Often,
this is a reference to an implementation guide that defines the special rules
along with other profiles etc.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource and can be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the resource and that modifies the understanding of the element
that contains it and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer is allowed to define an extension, there is a set of requirements
that SHALL be met as part of the definition of the extension. Applications
processing a resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
identifier: Identifiers associated with this Medication Administration that are defined by
business processes and/or used to refer to it when a direct URL reference to
the resource itself is not appropriate. They are business identifiers assigned
to this resource by the performer or other systems and remain constant as the
resource is updated and propagates from server to server.
instantiates: A protocol, guideline, orderset, or other definition that was adhered to in
whole or in part by this event.
partOf: A larger event of which this particular event is a component or step.
status: Will generally be set to show that the administration has been completed. For
some long running administrations such as infusions, it is possible for an
administration to be started but not completed or it may be paused while some
other process is under way.
statusReason: A code indicating why the administration was not performed.
category: Indicates where the medication is expected to be consumed or administered.
medicationCodeableConcept: Identifies the medication that was administered. This is either a link to a
resource representing the details of the medication or a simple attribute
carrying a code that identifies the medication from a known list of
medications.
medicationReference: Identifies the medication that was administered. This is either a link to a
resource representing the details of the medication or a simple attribute
carrying a code that identifies the medication from a known list of
medications.
subject: The person or animal or group receiving the medication.
context: The visit, admission, or other contact between patient and health care
provider during which the medication administration was performed.
supportingInformation: Additional information (for example, patient height and weight) that supports
the administration of the medication.
effectiveDateTime: A specific date/time or interval of time during which the administration took
place (or did not take place, when the 'notGiven' attribute is true). For many
administrations, such as swallowing a tablet the use of dateTime is more
appropriate.
effectivePeriod: A specific date/time or interval of time during which the administration took
place (or did not take place, when the 'notGiven' attribute is true). For many
administrations, such as swallowing a tablet the use of dateTime is more
appropriate.
performer: Indicates who or what performed the medication administration and how they
were involved.
reasonCode: A code indicating why the medication was given.
reasonReference: Condition or observation that supports why the medication was administered.
request: The original request, instruction or authority to perform the administration.
device: The device used in administering the medication to the patient. For example,
a particular infusion pump.
note: Extra information about the medication administration that is not conveyed by
the other attributes.
dosage: Describes the medication dosage information details e.g. dose, rate, site,
route, etc.
eventHistory: A summary of the events of interest that have occurred, such as when the
administration was verified.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.simple_types.id import idSchema
from spark_fhir_schemas.r4.complex_types.meta import MetaSchema
from spark_fhir_schemas.r4.simple_types.uri import uriSchema
from spark_fhir_schemas.r4.simple_types.code import codeSchema
from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.period import PeriodSchema
from spark_fhir_schemas.r4.complex_types.medicationadministration_performer import (
MedicationAdministration_PerformerSchema,
)
from spark_fhir_schemas.r4.complex_types.annotation import AnnotationSchema
from spark_fhir_schemas.r4.complex_types.medicationadministration_dosage import (
MedicationAdministration_DosageSchema,
)
if (
max_recursion_limit
and nesting_list.count("MedicationAdministration") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["MedicationAdministration"]
my_parent_path = (
parent_path + ".medicationadministration"
if parent_path
else "medicationadministration"
)
schema = StructType(
[
# This is a MedicationAdministration resource
StructField("resourceType", StringType(), True),
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField(
"id",
idSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".id",
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content might not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content. Often,
# this is a reference to an implementation guide that defines the special rules
# along with other profiles etc.
StructField(
"implicitRules",
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".implicitrules",
),
True,
),
# The base language in which the resource is written.
StructField(
"language",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".language",
),
True,
),
# A human-readable narrative that contains a summary of the resource and can be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource and that modifies the understanding of the element
# that contains it and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer is allowed to define an extension, there is a set of requirements
# that SHALL be met as part of the definition of the extension. Applications
# processing a resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Identifiers associated with this Medication Administration that are defined by
# business processes and/or used to refer to it when a direct URL reference to
# the resource itself is not appropriate. They are business identifiers assigned
# to this resource by the performer or other systems and remain constant as the
# resource is updated and propagates from server to server.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# A protocol, guideline, orderset, or other definition that was adhered to in
# whole or in part by this event.
StructField(
"instantiates",
ArrayType(
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# A larger event of which this particular event is a component or step.
StructField(
"partOf",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Will generally be set to show that the administration has been completed. For
# some long running administrations such as infusions, it is possible for an
# administration to be started but not completed or it may be paused while some
# other process is under way.
StructField(
"status",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".status",
),
True,
),
# A code indicating why the administration was not performed.
StructField(
"statusReason",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Indicates where the medication is expected to be consumed or administered.
StructField(
"category",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Identifies the medication that was administered. This is either a link to a
# resource representing the details of the medication or a simple attribute
# carrying a code that identifies the medication from a known list of
# medications.
StructField(
"medicationCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Identifies the medication that was administered. This is either a link to a
# resource representing the details of the medication or a simple attribute
# carrying a code that identifies the medication from a known list of
# medications.
StructField(
"medicationReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The person or animal or group receiving the medication.
StructField(
"subject",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The visit, admission, or other contact between patient and health care
# provider during which the medication administration was performed.
StructField(
"context",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Additional information (for example, patient height and weight) that supports
# the administration of the medication.
StructField(
"supportingInformation",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# A specific date/time or interval of time during which the administration took
# place (or did not take place, when the 'notGiven' attribute is true). For many
# administrations, such as swallowing a tablet the use of dateTime is more
# appropriate.
StructField("effectiveDateTime", TimestampType(), True),
# A specific date/time or interval of time during which the administration took
# place (or did not take place, when the 'notGiven' attribute is true). For many
# administrations, such as swallowing a tablet the use of dateTime is more
# appropriate.
StructField(
"effectivePeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indicates who or what performed the medication administration and how they
# were involved.
StructField(
"performer",
ArrayType(
MedicationAdministration_PerformerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# A code indicating why the medication was given.
StructField(
"reasonCode",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Condition or observation that supports why the medication was administered.
StructField(
"reasonReference",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# The original request, instruction or authority to perform the administration.
StructField(
"request",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The device used in administering the medication to the patient. For example,
# a particular infusion pump.
StructField(
"device",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Extra information about the medication administration that is not conveyed by
# the other attributes.
StructField(
"note",
ArrayType(
AnnotationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Describes the medication dosage information details e.g. dose, rate, site,
# route, etc.
StructField(
"dosage",
MedicationAdministration_DosageSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# A summary of the events of interest that have occurred, such as when the
# administration was verified.
StructField(
"eventHistory",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
|
nilq/baby-python
|
python
|
from distutils.core import setup
from setuptools import find_packages
setup(
name='pyesapi',
version='0.2.1',
description='Python interface to Eclipse Scripting API',
author='Michael Folkerts, Varian Medical Systems',
author_email='Michael.Folkerts@varian.com',
license='MIT',
packages=find_packages(),
install_requires=[
'numpy',
'scipy',
'pythonnet==2.3.0', # tested to work with python 3.6
],
)
|
nilq/baby-python
|
python
|
from common import *
import collections
try:
import cupy
except:
cupy = None
# From http://pythonhosted.org/pythran/MANUAL.html
def arc_distance(theta_1, phi_1, theta_2, phi_2):
"""
Calculates the pairwise arc distance
between all points in vector a and b.
"""
temp = (np.sin((theta_2-theta_1)/2)**2
+ np.cos(theta_1)*np.cos(theta_2) * np.sin((phi_2-phi_1)/2)**2)
distance_matrix = 2 * np.arctan2(np.sqrt(temp), np.sqrt(1-temp))
return distance_matrix
def test_numba(ds):
ds_original = ds.copy()
#ds.columns['x'] = (ds.columns['x']*1).copy() # convert non non-big endian for now
expr = arc_distance(ds.y*1, ds.y*1, ds.y**2*ds.y, ds.x+ds.y)
ds['arc_distance'] = expr
#assert ds.arc_distance.expression == expr.expression
ds['arc_distance_jit'] = ds['arc_distance'].jit_numba()
np.testing.assert_array_almost_equal(ds.arc_distance.tolist(), ds.arc_distance_jit.tolist())
# TODO: make it such that they can be pickled
ds_original.state_set(ds.state_get())
ds = ds_original
np.testing.assert_array_almost_equal(ds.arc_distance.tolist(), ds.arc_distance_jit.tolist())
@pytest.mark.skipif(sys.version_info < (3,6) and sys.version_info[0] != 2,
reason="no support for python3.5 (numba segfaults)")
def test_jit_overwrite(ds_local):
ds = ds_local # TODO: remote overwriting of functions does not work
ds_original = ds.copy()
expr = arc_distance(ds.y*1, ds.y*1, ds.y**2*ds.y, ds.x+ds.y)
ds['arc_distance'] = expr
ds['arc_distance_jit'] = ds['arc_distance'].jit_numba()
ds['arc_distance_jit'] = ds['arc_distance * 2'].jit_numba()
np.testing.assert_array_almost_equal((ds.arc_distance*2).tolist(), ds.arc_distance_jit.tolist())
@pytest.mark.skipif(cupy is None,
reason="cuda support relies on cupy")
def test_cuda(ds_local):
ds = ds_local
ds_original = ds.copy()
#ds.columns['x'] = (ds.columns['x']*1).copy() # convert non non-big endian for now
expr = arc_distance(ds.y*1, ds.y*1, ds.y**2*ds.y, ds.x+ds.y)
ds['arc_distance'] = expr
print(expr)
#assert ds.arc_distance.expression == expr.expression
ds['arc_distance_jit'] = ds['arc_distance'].jit_cuda()
np.testing.assert_almost_equal(ds.arc_distance.values, ds.arc_distance_jit.values)
# TODO: make it such that they can be pickled
ds_original.state_set(ds.state_get())
ds = ds_original
np.testing.assert_almost_equal(ds.arc_distance.values, ds.arc_distance_jit.values)
def test_metal(df_local):
pytest.importorskip("Metal")
df = df_local
df_original = df.copy()
#df.columns['x'] = (df.columns['x']*1).copy() # convert non non-big endian for now
expr = arc_distance(df.y*1, df.y*1, df.y**2*df.y, df.x+df.y)
# expr = df.x + df.y
df['arc_distance'] = expr
#assert df.arc_distance.expression == expr.expression
df['arc_distance_jit'] = df['arc_distance'].jit_metal()
# assert df.arc_distance.tolist() == df.arc_distance_jit.tolist()
np.testing.assert_almost_equal(df.arc_distance.values, df.arc_distance_jit.values, decimal=1)
# TODO: make it such that they can be pickled
df_original.state_set(df.state_get())
df = df_original
np.testing.assert_almost_equal(df.arc_distance.values, df.arc_distance_jit.values, decimal=1)
@pytest.mark.parametrize("type_name", vaex.array_types._type_names)
def test_types_metal(type_name, df_factory_numpy):
pytest.importorskip("Metal")
df = df_factory_numpy(x=np.array([0, 1, 2], dtype=type_name), y=[2, 3, 4])
# df = df_factory_numpy(x=np.array([0, 1, 2], dtype=type_name), y=np.array([2, 3, 4], dtype=type_name))
# df['x'] = df['x'].astype(type_name)
df['z'] = (df['x'] + df['y']).jit_metal()
assert df['z'].tolist() == [2, 4, 6]
|
nilq/baby-python
|
python
|
from unittest import mock
import pytest
from nesta.packages.geographies.uk_geography_lookup import get_gss_codes
from nesta.packages.geographies.uk_geography_lookup import get_children
from nesta.packages.geographies.uk_geography_lookup import _get_children
SPARQL_QUERY = '''
PREFIX entity: <http://statistics.data.gov.uk/def/statistical-entity#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT DISTINCT ?area_code
WHERE {
?area_entity entity:code ?area_code_entity;
rdfs:label ?area_code .
?area_code_entity rdfs:label ?area_code_type;
FILTER(SUBSTR(?area_code_type, 2, 2) > "01").
}
'''
@pytest.fixture
def pars_for_get_children():
return dict(base="dummy", geocodes="dummy", max_attempts=3)
@pytest.fixture
def side_effect_for_get_children():
return ([1, 2], [2, 3], ["A", 3], ["5", 4], [])
@mock.patch("nesta.packages.geographies.uk_geography_lookup.find_filepath_from_pathstub", return_value=None)
@mock.patch("builtins.open", new_callable=mock.mock_open, read_data=SPARQL_QUERY)
def test_get_gss_codes(mocked_open, mocked_find_filepath_from_pathstub):
codes = get_gss_codes(test=True)
assert len(codes) > 100
# def test_get_children():
# x = _get_children("E04", "E08000001")
# assert len(x) > 0
# @mock.patch("nesta.packages.geographies.uk_geography_lookup._get_children")
# def test_get_children_max_out(mocked, pars_for_get_children):
# mocked.side_effect = ([], [], [], [], [])
# get_children(**pars_for_get_children)
# assert mocked.call_count == pars_for_get_children["max_attempts"]
# @mock.patch("nesta.packages.geographies.uk_geography_lookup._get_children")
# def test_get_children_totals(mocked, pars_for_get_children, side_effect_for_get_children):
# mocked.side_effect = side_effect_for_get_children
# children = get_children(**pars_for_get_children)
# assert len(children) == sum(len(x) for x in side_effect_for_get_children)
|
nilq/baby-python
|
python
|
import unittest
import hcl2
from checkov.terraform.checks.resource.gcp.GoogleCloudSqlServerContainedDBAuthentication import check
from checkov.common.models.enums import CheckResult
class TestCloudSQLServerContainedDBAuthentication(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" {
database_version = "SQLSERVER_2017_STANDARD"
name = "general-sqlserver12"
project = "gcp-bridgecrew-deployment"
region = "us-central1"
settings {
activation_policy = "ALWAYS"
availability_type = "ZONAL"
backup_configuration {
binary_log_enabled = "false"
enabled = "true"
location = "us"
point_in_time_recovery_enabled = "false"
start_time = "00:00"
}
crash_safe_replication = "false"
database_flags =[{
name = "cross db ownership chaining"
value = "on"
}, {
name = "contained database authentication"
value = "on"
}]
disk_autoresize = "true"
disk_size = "20"
disk_type = "PD_SSD"
ip_configuration {
ipv4_enabled = "false"
private_network = "projects/gcp-bridgecrew-deployment/global/networks/default"
require_ssl = "false"
}
location_preference {
zone = "us-central1-a"
}
maintenance_window {
day = "0"
hour = "0"
}
pricing_plan = "PER_USE"
replication_type = "SYNCHRONOUS"
tier = "db-custom-1-4096"
}
}
""")
resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" {
database_version = "SQLSERVER_2017_STANDARD"
name = "general-sqlserver12"
project = "gcp-bridgecrew-deployment"
region = "us-central1"
settings {
activation_policy = "ALWAYS"
availability_type = "ZONAL"
backup_configuration {
binary_log_enabled = "false"
enabled = "true"
location = "us"
point_in_time_recovery_enabled = "false"
start_time = "00:00"
}
crash_safe_replication = "false"
database_flags {
name = "cross db ownership chaining"
value = "off"
}
database_flags {
name = "contained database authentication"
value = "off"
}
disk_autoresize = "true"
disk_size = "20"
disk_type = "PD_SSD"
ip_configuration {
ipv4_enabled = "false"
private_network = "projects/gcp-bridgecrew-deployment/global/networks/default"
require_ssl = "false"
}
location_preference {
zone = "us-central1-a"
}
maintenance_window {
day = "0"
hour = "0"
}
pricing_plan = "PER_USE"
replication_type = "SYNCHRONOUS"
tier = "db-custom-1-4096"
}
}
""")
resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_2(self):
hcl_res = hcl2.loads("""
resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" {
database_version = "SQLSERVER_2017_STANDARD122"
name = "general-sqlserver12"
project = "gcp-bridgecrew-deployment"
region = "us-central1"
settings {
activation_policy = "ALWAYS"
availability_type = "ZONAL"
backup_configuration {
binary_log_enabled = "false"
enabled = "true"
location = "us"
point_in_time_recovery_enabled = "false"
start_time = "00:00"
}
crash_safe_replication = "false"
database_flags {
name = "cross db ownership chaining"
value = "on"
}
database_flags {
name = "contained database authentication"
value = "off"
}
disk_autoresize = "true"
disk_size = "20"
disk_type = "PD_SSD"
ip_configuration {
ipv4_enabled = "false"
private_network = "projects/gcp-bridgecrew-deployment/global/networks/default"
require_ssl = "false"
}
location_preference {
zone = "us-central1-a"
}
maintenance_window {
day = "0"
hour = "0"
}
pricing_plan = "PER_USE"
replication_type = "SYNCHRONOUS"
tier = "db-custom-1-4096"
}
}
""")
resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_3(self):
hcl_res = hcl2.loads("""
resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" {
database_version = "SQLSERVER_2017_STANDARD"
name = "general-sqlserver12"
project = "gcp-bridgecrew-deployment"
region = "us-central1"
settings {
activation_policy = "ALWAYS"
availability_type = "ZONAL"
backup_configuration {
binary_log_enabled = "false"
enabled = "true"
location = "us"
point_in_time_recovery_enabled = "false"
start_time = "00:00"
}
crash_safe_replication = "false"
disk_autoresize = "true"
disk_size = "20"
disk_type = "PD_SSD"
ip_configuration {
ipv4_enabled = "false"
private_network = "projects/gcp-bridgecrew-deployment/global/networks/default"
require_ssl = "false"
}
location_preference {
zone = "us-central1-a"
}
maintenance_window {
day = "0"
hour = "0"
}
pricing_plan = "PER_USE"
replication_type = "SYNCHRONOUS"
tier = "db-custom-1-4096"
}
}
""")
resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_4(self):
hcl_res = hcl2.loads("""
resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" {
database_version = "SQLSERVER_2017_STANDARD"
name = "general-sqlserver12"
project = "gcp-bridgecrew-deployment"
region = "us-central1"
}
""")
resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import pytest
from scipy import stats
from locan import LocData
from locan.analysis import BlinkStatistics
from locan.analysis.blinking import _blink_statistics, _DistributionFits
def test__blink_statistics_0():
# frame with on and off periods up to three frames and starting with one-frame on-period.
frames = np.array([0, 4, 6, 7, 8, 12, 13])
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [1, 1, 3, 2])
assert np.array_equal(results["off_periods"], [3, 1, 3])
assert np.array_equal(results["on_periods_frame"], [0, 4, 6, 12])
assert np.array_equal(results["off_periods_frame"], [1, 5, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0], [1], [2, 3, 4], [5, 6]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [1, 5, 2])
assert np.array_equal(results["off_periods"], [3, 3])
assert np.array_equal(results["on_periods_frame"], [0, 4, 12])
assert np.array_equal(results["off_periods_frame"], [1, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0], [1, 2, 3, 4], [5, 6]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [14])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6]])
]
)
def test__blink_statistics_1():
# frame with on and off periods up to three frames and starting with two-frame on-period.
frames = np.array([0, 1, 3, 6, 7, 8, 12, 13])
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [1, 2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 3, 6, 12])
assert np.array_equal(results["off_periods_frame"], [2, 4, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [4, 3, 2])
assert np.array_equal(results["off_periods"], [2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 6, 12])
assert np.array_equal(results["off_periods_frame"], [4, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [14])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]]
)
]
)
def test__blink_statistics_2():
# frame with on and off periods up to three frames and starting with two-frame on-period.
frames = np.array([0, 1, 3, 6, 7, 8, 12, 13]) + 1
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [1, 1, 2, 3])
assert np.array_equal(results["on_periods_frame"], [1, 4, 7, 13])
assert np.array_equal(results["off_periods_frame"], [0, 3, 5, 10])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [5, 3, 2])
assert np.array_equal(results["off_periods"], [2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 7, 13])
assert np.array_equal(results["off_periods_frame"], [5, 10])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [15])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]]
)
]
)
def test__blink_statistics_3():
# frame with on and off periods up to three frames and starting with off-period.
frames = np.array([0, 1, 4, 6, 7, 8, 12, 13]) + 4
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [4, 2, 1, 3])
assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16])
assert np.array_equal(results["off_periods_frame"], [0, 6, 9, 13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [2, 1, 3])
assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16])
assert np.array_equal(results["off_periods_frame"], [6, 9, 13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=2, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [9, 2])
assert np.array_equal(results["off_periods"], [4, 3])
assert np.array_equal(results["on_periods_frame"], [4, 16])
assert np.array_equal(results["off_periods_frame"], [0, 13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=2, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [9, 2])
assert np.array_equal(results["off_periods"], [3])
assert np.array_equal(results["on_periods_frame"], [4, 16])
assert np.array_equal(results["off_periods_frame"], [13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [18])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert np.array_equal(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]])
def test__blink_statistics_4():
# frame with on and off periods up to three frames and starting with off-period.
frames = np.array([0, 1, 4, 6, 12, 13]) + 2
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 1, 2])
assert np.array_equal(results["off_periods"], [2, 2, 1, 5])
assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14])
assert np.array_equal(results["off_periods_frame"], [0, 4, 7, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]]
)
]
)
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 1, 2])
assert np.array_equal(results["off_periods"], [2, 1, 5])
assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14])
assert np.array_equal(results["off_periods_frame"], [4, 7, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]]
)
]
)
results = _blink_statistics(frames, memory=3, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [9, 2])
assert np.array_equal(results["off_periods"], [5])
assert np.array_equal(results["on_periods_frame"], [0, 14])
assert np.array_equal(results["off_periods_frame"], [9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]])
]
)
results = _blink_statistics(frames, memory=3, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [7, 2])
assert np.array_equal(results["off_periods"], [5])
assert np.array_equal(results["on_periods_frame"], [2, 14])
assert np.array_equal(results["off_periods_frame"], [9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]])
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [16])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5]])
]
)
def test__blink_statistics_5(caplog):
# frame with on and off periods including repeated frames.
frames = np.array([0, 1, 4, 4, 6, 7, 8, 12, 12, 13]) + 4
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [4, 2, 1, 3])
assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16])
assert np.array_equal(results["off_periods_frame"], [0, 6, 9, 13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
assert caplog.record_tuples == [
(
"locan.analysis.blinking",
30,
"There are 2 duplicated frames found that will be ignored.",
)
]
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [18])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]]
)
]
)
def test__blink_statistics_6():
# frame with on and off periods up to three frames and starting with one-frame on-period.
frames = np.array([0, 2, 3, 9])
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [1, 2, 1])
assert np.array_equal(results["off_periods"], [1, 5])
assert np.array_equal(results["on_periods_frame"], [0, 2, 9])
assert np.array_equal(results["off_periods_frame"], [1, 4])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0], [1, 2], [3]])
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [10])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3]])
]
)
@pytest.fixture()
def locdata_simple():
locdata_dict = {
"position_x": [0, 0, 1, 4, 5],
"position_y": [0, 1, 3, 4, 1],
"intensity": [0, 1, 3, 4, 1],
"psf_sigma_x": [100, 100, 100, 100, 100],
}
return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict))
@pytest.fixture()
def locdata_with_zero_frame():
locdata_dict = {"frame": [0, 1, 2, 4, 10, 11, 14]}
return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict))
@pytest.fixture()
def locdata_without_zero_frame():
locdata_dict = {"frame": [1, 2, 4, 10, 11, 14]}
return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict))
@pytest.fixture()
def locdata_with_repetitions():
locdata_dict = {"frame": [2, 2, 2, 4, 4, 14]}
return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict))
def test_blink_statistics(locdata_with_zero_frame, locdata_without_zero_frame):
bs = _blink_statistics(
locdata_with_zero_frame, memory=0, remove_heading_off_periods=False
)
assert all(bs["on_periods"] == [3, 1, 2, 1])
assert all(bs["off_periods"] == [1, 5, 2])
bs = _blink_statistics(
locdata_with_zero_frame.data.frame.values,
memory=0,
remove_heading_off_periods=False,
)
assert all(bs["on_periods"] == [3, 1, 2, 1])
assert all(bs["off_periods"] == [1, 5, 2])
bs = _blink_statistics(
locdata_without_zero_frame, memory=0, remove_heading_off_periods=False
)
assert all(bs["on_periods"] == [2, 1, 2, 1])
assert all(bs["off_periods"] == [1, 1, 5, 2])
bs = _blink_statistics(
locdata_with_zero_frame, memory=0, remove_heading_off_periods=True
)
assert all(bs["on_periods"] == [3, 1, 2, 1])
assert all(bs["off_periods"] == [1, 5, 2])
bs = _blink_statistics(
locdata_without_zero_frame, memory=0, remove_heading_off_periods=True
)
assert all(bs["on_periods"] == [2, 1, 2, 1])
assert all(bs["off_periods"] == [1, 5, 2])
bs = _blink_statistics(
locdata_with_zero_frame, memory=1, remove_heading_off_periods=False
)
assert all(bs["on_periods"] == [5, 2, 1])
assert all(bs["off_periods"] == [5, 2])
bs = _blink_statistics(
locdata_without_zero_frame, memory=1, remove_heading_off_periods=False
)
assert all(bs["on_periods"] == [5, 2, 1])
assert all(bs["off_periods"] == [5, 2])
bs = _blink_statistics(
locdata_with_zero_frame, memory=2, remove_heading_off_periods=False
)
assert all(bs["on_periods"] == [5, 5])
assert all(bs["off_periods"] == [5])
bs = _blink_statistics(
locdata_without_zero_frame, memory=2, remove_heading_off_periods=False
)
assert all(bs["on_periods"] == [5, 5])
assert all(bs["off_periods"] == [5])
def test_blink_statistics__with_repetitions(locdata_with_repetitions):
_blink_statistics(
locdata_with_repetitions, memory=0, remove_heading_off_periods=False
)
def test_BlinkStatistics_empty(caplog):
bs = BlinkStatistics().compute(LocData())
bs.fit_distributions()
bs.hist()
assert caplog.record_tuples == [
("locan.analysis.blinking", 30, "Locdata is empty."),
("locan.analysis.blinking", 30, "No results available to fit."),
]
def test_BlinkStatistics(locdata_with_zero_frame):
bs = BlinkStatistics().compute(locdata_with_zero_frame)
assert repr(bs) == "BlinkStatistics(memory=0, remove_heading_off_periods=True)"
assert all(bs.results["on_periods"] == [3, 1, 2, 1])
assert all(bs.results["off_periods"] == [1, 5, 2])
assert bs.distribution_statistics == {}
bs.hist(data_identifier="on_periods", ax=None, bins="auto", log=True, fit=False)
bs.hist(data_identifier="off_periods", ax=None, bins="auto", log=True, fit=False)
bs.hist(data_identifier="on_periods", ax=None, bins="auto", log=True, fit=True)
def test_DistributionFits(locdata_with_zero_frame):
bs = BlinkStatistics().compute(locdata_with_zero_frame)
df = _DistributionFits(bs, distribution=stats.expon, data_identifier="on_periods")
# print(df.analysis_class.results)
assert len(df.analysis_class.results) == 5
assert df.data_identifier == "on_periods"
assert (
repr(df) == "_DistributionFits(analysis_class=BlinkStatistics, "
"distribution=expon_gen, data_identifier=on_periods)"
)
assert df.parameter_dict() == {}
df.fit()
assert list(df.parameter_dict().keys()) == ["on_periods_loc", "on_periods_scale"]
df = _DistributionFits(bs, distribution=stats.expon, data_identifier="off_periods")
df.fit()
assert list(df.parameter_dict().keys()) == ["off_periods_loc", "off_periods_scale"]
df.plot()
# print(df.analysis_class.results[df.data_identifier])
def test_fit_distributions(locdata_with_zero_frame):
bs = BlinkStatistics().compute(locdata_with_zero_frame)
bs.fit_distributions()
assert bs.distribution_statistics["on_periods"].parameter_dict() == {
"on_periods_loc": 1.0,
"on_periods_scale": 0.75,
}
assert bs.distribution_statistics["off_periods"].parameter_dict() == {
"off_periods_loc": 1.0,
"off_periods_scale": 1.6666666666666665,
}
bs.hist()
bs.hist(data_identifier="off_periods")
del bs
bs = BlinkStatistics().compute(locdata_with_zero_frame)
bs.fit_distributions(with_constraints=False)
assert (
bs.distribution_statistics["on_periods"].parameter_dict()["on_periods_loc"] == 1
)
assert (
bs.distribution_statistics["off_periods"].parameter_dict()["off_periods_loc"]
== 1
)
del bs
bs = BlinkStatistics().compute(locdata_with_zero_frame)
bs.fit_distributions(data_identifier="on_periods")
assert bs.distribution_statistics["on_periods"].parameter_dict() == {
"on_periods_loc": 1.0,
"on_periods_scale": 0.75,
}
|
nilq/baby-python
|
python
|
import unittest
import sys
from math import pi
sys.path.insert(0, "..")
from sections.sections import Wedge
import test_sections_generic as generic
class TestPhysicalProperties(generic.TestPhysicalProperties, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sectclass = Wedge
cls.dimensions = dict(r=3.0, phi=pi)
cls.angular = ["phi"]
cls.rp = 5.0, 4.0
cls._cog = 1.2732395447351625, 0.0
cls.A = 14.137166941154069
cls._I0 = 31.808625617596654, 8.890313812363729, 0.0
cls._I = 31.808625617596654, 31.808625617596654, 0.0
def test_check_dimensions(self):
self.assertRaises(ValueError, self.section.set_dimensions, r=-1)
self.assertRaises(ValueError, self.section.set_dimensions, r=0)
self.assertRaises(ValueError, self.section.set_dimensions, phi=-1)
self.assertRaises(ValueError, self.section.set_dimensions, phi=0)
self.assertRaises(ValueError, self.section.set_dimensions, phi=2.1*pi)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Usage raw_harness.py Y/N repTimes sourceFile arguments
# finally, will append a full function file
'''
original R file
#if has input, gen
args=c(args, argd, ...)
dataset = setup
'''
import sys,os
raw_haress_str = '''
rnorm <- runif
if(exists('setup')) {
if(length(bench_args) == 0) {
bench_args <- setup()
TRUE
} else {
bench_args <- setup(bench_args)
FALSE
}
}
if(length(bench_args) == 0) {
for(bench_i in 1:bench_reps) { run() }
} else {
for(bench_i in 1:bench_reps) { run(bench_args) }
}
'''
if __name__ == "__main__":
argv = sys.argv
argc = int(argv[1]) #this is how many fixed for the rvm
rvm_path = argv[2]
rvm_cmd = argv[3:(argc+1)] #with all args
use_system_time = argv[argc+1]
if(use_system_time == 'TRUE'):
print('[rbench]Cannot use system.time() for these experiment R VMs. Fall back to meter=time.')
rep = argv[argc+2]
print(rep)
src = argv[argc+3] #the file
print(src)
#construct the file's full current full path
src = os.path.join(os.getcwd(), src)
#now generate the source file
#use the benchmark file to
src_dir = os.path.dirname(src)
src_basename = os.path.basename(src)
tmpsrc = os.path.join(src_dir, 'rbench_'+src_basename)
#then decide whether there are additional args
if(len(argv) > argc+4):
bench_args = argv[argc+4:]
bench_args_str = "bench_args <- c('" + "','".join(bench_args)+ "')\n"
else:
bench_args_str = "bench_args <- character(0)\n"
bench_reps_str = 'bench_reps <- ' + rep +'\n'
# now generate the file
with open(tmpsrc, 'w') as f:
f.write('harness_argc<-1\n')
f.write(bench_args_str)
f.write(bench_reps_str)
with open(src, 'r') as srcf:
f.write(srcf.read())
f.write(raw_haress_str)
#now start running
#need change to the directory
os.chdir(rvm_path)
rvm_cmd.append(tmpsrc)
exit_code = os.system(' '.join(rvm_cmd))
os.remove(tmpsrc)
sys.exit(exit_code)
|
nilq/baby-python
|
python
|
# Import libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
import scipy
# from scipy.sparse.construct import random
import scipy.stats
from scipy.stats import arcsine
from scipy.interpolate import interp1d
from astropy.io import fits
import astropy.units as u
# WebbPSF
import webbpsf
from webbpsf.opds import OTE_Linear_Model_WSS
from webbpsf.utils import get_webbpsf_data_path
# Logging
from . import conf
from .logging_utils import setup_logging
import logging
_log = logging.getLogger('webbpsf_ext')
# Progress bar
from tqdm.auto import trange, tqdm
__epsilon = np.finfo(float).eps
def OPDFile_to_HDUList(file, slice=0):
"""
Make a picklable HDUList for ingesting into multiproccessor WebbPSF
helper function.
"""
try:
hdul = fits.open(file)
except FileNotFoundError:
# Check WebbPSF instrument OPD directory
if 'NIRCam' in file:
inst = 'NIRCam'
elif 'MIRI' in file:
inst = 'MIRI'
elif 'NIRSpec' in file:
inst = 'NIRSpec'
elif 'NIRISS' in file:
inst = 'NIRISS'
elif 'FGS' in file:
inst = 'FGS'
opd_dir = os.path.join(get_webbpsf_data_path(),inst,'OPD')
hdul = fits.open(os.path.join(opd_dir, file))
ndim = len(hdul[0].data.shape)
if ndim==3:
opd_im = hdul[0].data[slice,:,:]
else:
opd_im = hdul[0].data
hdu_new = fits.PrimaryHDU(opd_im)
hdu_new.header = hdul[0].header.copy()
opd_hdul = fits.HDUList([hdu_new])
hdul.close()
return opd_hdul
class OTE_WFE_Drift_Model(OTE_Linear_Model_WSS):
"""
OPD subclass for calculating OPD drift values over time.
"""
def __init__(self, **kwargs):
"""
Parameters
----------
opdfile : str or fits.HDUList
FITS file to load an OPD from. The OPD must be specified in microns.
opd_index : int, optional
FITS extension to load OPD from
transmission : str or None
FITS file for pupil mask, with throughput from 0-1.
If not explicitly provided, will be inferred from
wherever is nonzero in the OPD file.
slice : int, optional
Slice of a datacube to load OPD from, if the selected
extension contains a datacube.
segment_mask_file : str
FITS file for pupil mask, with throughput from 0-1. If not
explicitly provided, will use JWpupil_segments.fits
zero : bool
Set an OPD to precisely zero everywhere.
rm_ptt : bool
Remove piston, tip, and tilt? This is mostly for visualizing
the higher order parts of the LOM. Default: False.
"""
# Initialize OTE_Linear_Model_WSS
OTE_Linear_Model_WSS.__init__(self, **kwargs)
# Initialize delta OPD normalized images
self.dopd_thermal = None
self.dopd_frill = None
self.dopd_iec = None
# Initialize normalized delta OPD images
log_prev = conf.logging_level
if 'WARN' not in log_prev:
setup_logging('WARN', verbose=False)
self._calc_delta_opds()
if 'WARN' not in log_prev:
setup_logging(log_prev, verbose=False)
def reset(self, verbose=True):
""" Reset an OPD to the state it was loaded from disk.
i.e. undo all segment moves.
"""
self._frill_wfe_amplitude = 0
self._iec_wfe_amplitude = 0
self.opd = self._opd_original.copy()
self.segment_state *= 0
if verbose:
_log.info("Reset to unperturbed OPD")
def _calc_delta_opds(self, thermal=True, frill=True, iec=True):
"""
Calculate delta OPDs for the three components and save to
class properties. Each delta OPD image will be normalized
such that the nm RMS WFE is equal to 1.
"""
# Set everything to initial state
self.reset(verbose=False)
# Calculate thermal dOPD
if thermal:
self.thermal_slew(1*u.day)
# self.opd has now been updated to drifted OPD
# Calculate delta OPD and save into self.opd attribute
# This is because self.rms() uses the image in self.opd
self.opd -= self._opd_original
# scale by RMS of delta OPD, and save
self.dopd_thermal = self.opd / self.rms()
# Calculate frill dOPD
if frill:
# Explicitly set thermal component to 0
self.thermal_slew(0*u.min, scaling=0, delay_update=True)
self.apply_frill_drift(amplitude=1)
# self.opd has now been updated to drifted OPD
# Temporarily calculate delta and calc rms
self.opd -= self._opd_original
# scale by RMS of delta OPD, and save
self.dopd_frill = self.opd / self.rms()
# Calculate IEC dOPD
if iec:
# Explicitly set thermal and frill components to 0
self.thermal_slew(0*u.min, scaling=0, delay_update=True)
self.apply_frill_drift(amplitude=0, delay_update=True)
self.apply_iec_drift(amplitude=1)
# self.opd has now been updated to drifted OPD
# Temporarily calculate delta and calc rms
self.opd -= self._opd_original
# scale by RMS of delta OPD, and save
self.dopd_iec = self.opd / self.rms()
# Back to initial state
self.reset(verbose=False)
def calc_rms(self, arr, segname=None):
"""Calculate RMS of input images"""
# RMS for a single image
def rms_im(im):
""" Find RMS of an image by excluding pixels with 0s, NaNs, or Infs"""
ind = (im != 0) & (np.isfinite(im))
res = 0 if len(im[ind]) == 0 else np.sqrt(np.mean(im[ind] ** 2))
res = 0 if np.isnan(res) else res
return res
# Reshape into a 3-dimension cube for consistency
if len(arr.shape) == 3:
nz,ny,nx = arr.shape
else:
ny,nx = arr.shape
nz = 1
arr = arr.reshape([nz,ny,nx])
if segname is None:
# RMS of whole aperture
rms = np.asarray([rms_im(im) for im in arr])
else:
# RMS of specified segment
assert (segname in self.segnames)
iseg = np.where(self.segnames == segname)[0][0] + 1 # segment index from 1 - 18
seg_mask = self._segment_masks == iseg
arr_seg = arr[:,seg_mask]
rms = np.asarray([rms_im(im) for im in arr_seg])
# If single image, remove first dimension
if nz==1:
rms = rms[0]
return rms
def slew_scaling(self, start_angle, end_angle):
""" WFE scaling due to slew angle
Scale the WSS Hexike components based on slew pitch angles.
Parameters
----------
start_angle : float
The starting sun pitch angle, in degrees between -5 and +45
end_angle : float
The ending sun pitch angle, in degrees between -5 and +45
"""
num = np.sin(np.radians(end_angle)) - np.sin(np.radians(start_angle))
den = np.sin(np.radians(45.)) - np.sin(np.radians(-5.))
return num / den
def gen_frill_drift(self, delta_time, start_angle=-5, end_angle=45, case='BOL'):
""" Frill WFE drift scaling
Function to determine the factor to scale the delta OPD associated with
frill tensioning. Returns the RMS WFE (nm) depending on time and slew
angles.
Parameters
----------
delta_time : astropy.units quantity object
The time since a slew occurred.
start_angle : float
The starting sun pitch angle, in degrees between -5 and +45
end_angle : float
The ending sun pitch angle, in degrees between -5 and +45
case : string
either "BOL" for current best estimate at beginning of life, or
"EOL" for more conservative prediction at end of life. The amplitude
of the frill drift is roughly 2x lower for BOL (8.6 nm after 2 days)
versus EOL (18.4 nm after 2 days).
"""
frill_hours = np.array([
0.00, 0.55, 1.00, 1.60, 2.23, 2.85, 3.47, 4.09,
4.71, 5.33, 5.94, 6.56, 7.78, 9.00, 9.60, 11.41,
12.92, 15.02, 18.00, 21.57, 23.94, 26.90, 32.22,
35.76, 41.07, 45.20, 50.50, 100.58
])
# Normalized frill drift amplitude
frill_wfe_drift_norm = np.array([
0.000, 0.069, 0.120, 0.176, 0.232, 0.277,
0.320, 0.362, 0.404, 0.444, 0.480, 0.514,
0.570, 0.623, 0.648, 0.709, 0.758, 0.807,
0.862, 0.906, 0.930, 0.948, 0.972, 0.981,
0.991, 0.995, 0.998, 1.000
])
# Create interpolation function
finterp = interp1d(frill_hours, frill_wfe_drift_norm,
kind='cubic', fill_value=(0, 1), bounds_error=False)
# Convert input time to hours and get normalized amplitude
time_hour = delta_time.to(u.hour).value
amp_norm = finterp(time_hour)
# Scale height from either EOL or BOL (nm RMS)
# Assuming slew angles from -5 to +45 deg
if case=='EOL':
wfe_drift_rms = 18.4 * amp_norm
elif case=='BOL':
wfe_drift_rms = 8.6 * amp_norm
else:
print(f'case={case} is not recognized')
# Get scale factor based on start and end angle solar elongation angles
scaling = self.slew_scaling(start_angle, end_angle)
wfe_drift_rms *= scaling
return wfe_drift_rms
def gen_thermal_drift(self, delta_time, start_angle=-5, end_angle=45, case='BOL'):
""" Thermal WFE drift scaling
Function to determine the factor to scale the delta OPD associated with
OTE backplane thermal distortion. Returns the RMS WFE (nm) depending on
time and slew angles.
Parameters
----------
delta_time : astropy.units quantity object
The time since a slew occurred.
start_angle : float
The starting sun pitch angle, in degrees between -5 and +45
end_angle : float
The ending sun pitch angle, in degrees between -5 and +45
case : string
either "BOL" for current best estimate at beginning of life, or
"EOL" for more conservative prediction at end of life. The amplitude
of the frill drift is roughly 3x lower for BOL (13 nm after 14 days)
versus EOL (43 nm after 14 days).
"""
thermal_hours = np.array([
0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.,
11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21.,
22., 23., 24., 48., 72., 96., 120., 144., 168., 192., 216.,
240., 264., 288., 312., 336., 360., 384., 408., 432., 456., 480., 800.
])
thermal_wfe_drift_norm = np.array([
0.0000, 0.0134, 0.0259, 0.0375, 0.0484, 0.0587, 0.0685, 0.0777, 0.0865,
0.0950, 0.1031, 0.1109, 0.1185, 0.1259, 0.1330, 0.1400, 0.1468, 0.1534,
0.1600, 0.1664, 0.1727, 0.1789, 0.1850, 0.1910, 0.1970, 0.3243, 0.4315,
0.5227, 0.5999, 0.6650, 0.7197, 0.7655, 0.8038, 0.8358, 0.8625, 0.8849,
0.9035, 0.9191, 0.9322, 0.9431, 0.9522, 0.9598, 0.9662, 0.9716, 1.0000
])
# Create interpolation function
finterp = interp1d(thermal_hours, thermal_wfe_drift_norm,
kind='cubic', fill_value=(0, 1), bounds_error=False)
# Convert input time to hours and get normalized amplitude
time_hour = delta_time.to(u.hour).value
amp_norm = finterp(time_hour)
# Normalize to 14 days (336 hours)
amp_norm /= finterp(336)
# Scale height from either EOL or BOL (nm RMS)
# Assuming full slew angle from -5 to +45 deg
if case=='EOL':
wfe_drift_rms = 45.0 * amp_norm
elif case=='BOL':
wfe_drift_rms = 13.0 * amp_norm
else:
print(f'case={case} is not recognized')
# Get scale factor based on start and end angle solar elongation angles
scaling = self.slew_scaling(start_angle, end_angle)
wfe_drift_rms *= scaling
return wfe_drift_rms
def gen_iec_series(self, delta_time, amplitude=3.5, period=5.0,
interp_kind='linear', random_seed=None):
"""Create a series of IEC WFE scale factors
Create a series of random IEC heater state changes based on
arcsine distribution.
Parameters
----------
delta_time : astropy.units quantity object array
Time series of atropy units to interpolate IEC amplitudes
Keyword Args
------------
amplitude : float
Full amplitude of arcsine distribution. Values will range
from -0.5*amplitude to +0.5*amplitude.
period : float
Period in minutes of IEC oscillations. Usually 3-5 minutes.
random_seed : int
Provide a random seed value between 0 and (2**32)-1 to generate
reproducible random values.
interp_kind : str or int
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first, second or third
order; 'previous' and 'next' simply return the previous or next value
of the point) or as an integer specifying the order of the spline
interpolator to use.
Default is 'linear'.
"""
# Convert time array to minutes and get values
if isinstance(delta_time, (u.Quantity)):
time_arr_minutes = np.array(delta_time.to(u.min).value)
else:
time_arr_minutes = delta_time
# Create a series of random IEC heater state changes based on arcsin distribution
dt = period
nsamp = int(np.max(time_arr_minutes)/dt) + 2
tvals = np.arange(nsamp) * dt
# Random values between 0 and 1
arcsine_rand = arcsine.rvs(size=nsamp, random_state=random_seed)
# Scale by amplitude
wfe_iec_all = arcsine_rand * amplitude - amplitude / 2
# res = np.interp(time_arr_minutes, tvals, wfe_iec_all)
finterp = interp1d(tvals, wfe_iec_all, kind=interp_kind,
fill_value=0, bounds_error=False)
res = finterp(time_arr_minutes)
return res
def gen_delta_opds(self, delta_time, start_angle=-5, end_angle=45,
do_thermal=True, do_frill=True, do_iec=True,
case='BOL', return_wfe_amps=True, return_dopd_fin=True,
random_seed=None, **kwargs):
"""Create series of delta OPDs
Generate a series of delta OPDS, the result of which is
a combination of thermal, frill, and IEC effects. The
thermal and frill values are dependent on time, start/end
slew angles, and case ('BOL' or 'EOL'). Delta OPD contributions
from the IEC heater switching are treated as random state
switches assuming an arcsine distribution.
Parameters
----------
delta_time : astropy.units quantity object
An array of times assuming astropy units.
start_angle : float
The starting sun pitch angle, in degrees between -5 and +45.
end_angle : float
The ending sun pitch angle, in degrees between -5 and +45.
case : string
Either "BOL" for current best estimate at beginning of life, or
"EOL" for more conservative prediction at end of life.
do_thermal : bool
Include thermal slew component? Mostly for debugging purposes.
do_frill : bool
Include frill component? Mostly for debugging purposes.
do_iec : bool
Include IEC component? Good to exclude if calling this function
repeatedly for evolution of multiple slews, then add IEC later.
return_wfe_amps : bool
Return a dictionary that provides the RMS WFE (nm) of each
component at each time step.
return_dopd_fin : bool
Option to exclude calculating final delta OPD in case we only
want the final RMS WFE dictionary.
random_seed : int
Random seed to pass to IEC generation.
"""
if (not return_wfe_amps) and (not return_dopd_fin):
_log.warning('Must specify `return_wfe_amps` and/or `return_dopd_fin`')
return
try:
nz = len(delta_time)
except TypeError:
nz = 1
ny,nx = self.opd.shape
# Thermal drift amplitudes
if do_thermal:
amp_thermal = self.gen_thermal_drift(delta_time, case=case,
start_angle=start_angle,
end_angle=end_angle)
else:
amp_thermal = np.zeros(nz) if nz>1 else 0
# Frill drift amplitudes
if do_frill:
amp_frill = self.gen_frill_drift(delta_time, case=case,
start_angle=start_angle,
end_angle=end_angle)
else:
amp_frill = np.zeros(nz) if nz>1 else 0
# Random IEC amplitudes
if do_iec:
amp_iec = self.gen_iec_series(delta_time, random_seed=random_seed, **kwargs)
if nz>1:
amp_iec[0] = 0
else:
amp_iec = np.zeros(nz) if nz>1 else 0
# Add OPD deltas
delta_opd_fin = np.zeros([nz,ny,nx])
if do_thermal:
amp = np.reshape(amp_thermal, [-1,1,1])
delta_opd_fin += self.dopd_thermal.reshape([1,ny,nx]) * amp
if do_frill:
amp = np.reshape(amp_frill, [-1,1,1])
delta_opd_fin += self.dopd_frill.reshape([1,ny,nx]) * amp
if do_iec:
amp = np.reshape(amp_iec, [-1,1,1])
delta_opd_fin += self.dopd_iec.reshape([1,ny,nx]) * amp
if nz==1:
delta_opd_fin = delta_opd_fin[0]
# Get final RMS in nm
rms_tot = np.array(self.calc_rms(delta_opd_fin)) * 1e9
wfe_amps = {
'thermal': amp_thermal,
'frill' : amp_frill,
'iec' : amp_iec,
'total' : rms_tot
}
if return_wfe_amps and return_dopd_fin:
return delta_opd_fin, wfe_amps
elif return_wfe_amps:
return wfe_amps
elif return_dopd_fin:
return delta_opd_fin
else:
_log.warning('Must specify `return_wfe_amps` and/or `return_dopd_fin`')
def evolve_dopd(self, delta_time, slew_angles, case='BOL',
return_wfe_amps=True, return_dopd_fin=True,
do_thermal=True, do_frill=True, do_iec=True, **kwargs):
""" Evolve the delta OPD with multiple slews
Input an array of `delta_time` and `slew_angles` to return the
evolution of a delta_OPD image. Option to return the various
WFE components, including OTE backplane (thermal), frill tensioning,
and IEC heater switching.
Parameters
----------
delta_time : astropy.units quantity object
An array of times assuming astropy units.
slew_angles : ndarray
The sun pitch angles, in degrees between -5 and +45.
case : string
Either "BOL" for current best estimate at beginning of life, or
"EOL" for more conservative prediction at end of life.
do_thermal : bool
Include thermal slew component? Mostly for debugging purposes.
do_frill : bool
Include frill component? Mostly for debugging purposes.
do_iec : bool
Include IEC component? Good to exclude if calling this function
repeatedly for evolution of multiple slews, then add IEC later.
return_wfe_amps : bool
Return a dictionary that provides the RMS WFE (nm) of each
component at each time step.
return_dopd_fin : bool
Option to exclude calculating final delta OPD in case we only
want the final RMS WFE dictionary.
Keyword Args
------------
amplitude : float
Full amplitude of IEC arcsine distribution. Values will range
from -0.5*amplitude to +0.5*amplitude.
period : float
Period in minutes of IEC oscillations. Usually 3-5 minutes.
"""
if (not return_wfe_amps) and (not return_dopd_fin):
_log.warning('Must specify `return_wfe_amps` and/or `return_dopd_fin`')
return
log_prev = conf.logging_level
if 'WARN' not in log_prev:
setup_logging('WARN', verbose=False)
# Indices where slews occur
islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1
islew = np.concatenate(([0], islew))
# Build delta OPDs for each slew angle
kwargs['case'] = case
kwargs['return_wfe_amps'] = return_wfe_amps
kwargs['return_dopd_fin'] = True
kwargs['do_thermal'] = do_thermal
kwargs['do_frill'] = do_frill
kwargs['do_iec'] = False
for i in tqdm(islew, desc='Slews'):
ang1 = slew_angles[0] if i==0 else ang2
ang2 = slew_angles[i]
tvals = delta_time[i:]
tvals = tvals - tvals[0]
res = self.gen_delta_opds(tvals, start_angle=ang1, end_angle=ang2, **kwargs)
if return_wfe_amps:
dopds, wfe_dict = res
else:
dopds = res
# Accumulate delta OPD images
if i==0:
dopds_fin = dopds + 0.0
else:
dopds_fin[i:] += dopds
# Add in drift amplitudes for thermal and frill components
if return_wfe_amps:
if i==0:
wfe_dict_fin = wfe_dict
else:
for k in wfe_dict.keys():
wfe_dict_fin[k][i:] += wfe_dict[k]
del dopds
# Get IEC values
if do_iec:
kwargs['do_thermal'] = False
kwargs['do_frill'] = False
kwargs['do_iec'] = True
res = self.gen_delta_opds(delta_time-delta_time[0], **kwargs)
if return_wfe_amps:
dopds, wfe_dict = res
wfe_dict_fin['iec'] = wfe_dict['iec']
else:
dopds = res
# Add IEC OPDs
dopds_fin += dopds
del dopds
if 'WARN' not in log_prev:
setup_logging(log_prev, verbose=False)
# Calculate RMS values on final delta OPDs
if return_wfe_amps:
wfe_dict_fin['total'] = self.calc_rms(dopds_fin)*1e9
if return_wfe_amps and return_dopd_fin:
return dopds_fin, wfe_dict_fin
elif return_dopd_fin:
return dopds_fin
elif return_wfe_amps:
return wfe_dict_fin
def interp_dopds(self, delta_time, dopds, dt_new, wfe_dict=None, interp_kind='linear', **kwargs):
""" Interpolate an array of delta OPDs
Perform a linear interpolation on a series of delta OPDS.
Parameters
----------
delta_time : astropy.units quantity object
An array of times assuming astropy units corresponding to each `dopd`.
dopds : ndarray
Array of delta OPD images associated with `delta_time`.
dt_new : astropy.units quantity object
New array to interpolate onto.
Keyword Args
------------
wfe_dict : dict or None
If specified, then must provide a dictionary where the values
for each keywords are the WFE drift components associated with
each `delta_time`. Will then return a dictionary
interp_kind : str or int
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first, second or third
order; 'previous' and 'next' simply return the previous or next value
of the point) or as an integer specifying the order of the spline
interpolator to use.
Default is 'linear'.
"""
dt_new_vals = dt_new.to('hour')
# Create interpolation function
dt_vals = delta_time.to('hour')
func = interp1d(dt_vals, dopds, axis=0, kind=interp_kind, bounds_error=True)
opds_new = func(dt_new_vals)
if wfe_dict is not None:
wfe_dict_new = {}
for k in wfe_dict.keys():
vals = wfe_dict[k]
func = interp1d(dt_vals, vals, kind=interp_kind, bounds_error=True)
wfe_dict_new[k] = func(dt_new_vals)
return opds_new, wfe_dict_new
else:
return opds_new
def slew_pos_averages(self, delta_time, slew_angles, opds=None, wfe_dict=None,
mn_func=np.mean, interpolate=False, **kwargs):
""" Get averages at each slew position
Given a series of times and slew angles, calculate the average OPD and
WFE RMS error within each slew angle position. Returns a tuple with new
arrays of (dt_new, opds_new, wfe_dict_new).
If input both `opds` and `wfe_dict` are not specified, then we call
the `evolve_dopd` function and return .
Parameters
----------
delta_time : astropy.units quantity object
An array of times assuming astropy units.
slew_angles : ndarray
The sun pitch angles at each `delta_time`, in degrees between -5 and +45.
opds : ndarray or None
Cube of OPD images (or delta OPDs) associated with each `delta_time`.
If set to None, then a new set of OPDs are not calculated.
wfe_dict : dict or None
If specified, then must provide a dictionary where the values
for each keywords are the WFE drift components associated with
each `delta_time`. New set of WFE dictionary is not calculated if set
to None.
mn_func : function
Function to use for taking averages. Default: np.mean()
interpolate : bool
Instead of taking average, use the interpolation function `self.interp_dopds()`.
Keyword Args
------------
case : string
Either "BOL" for current best estimate at beginning of life, or
"EOL" for more conservative prediction at end of life.
do_thermal : bool
Include thermal slew component? Mostly for debugging purposes.
do_frill : bool
Include frill component? Mostly for debugging purposes.
do_iec : bool
Include IEC component? Good to exclude if calling this function
repeatedly for evolution of multiple slews, then add IEC later.
amplitude : float
Full amplitude of IEC arcsine distribution. Values will range
from -0.5*amplitude to +0.5*amplitude.
period : float
Period in minutes of IEC oscillations. Usually 3-5 minutes.
kind : str or int
Specifies the kind of interpolation (if specified) as a string.
Default: 'linear'.
"""
if (opds is None) and (wfe_dict is None):
kwargs['return_wfe_amps'] = True
kwargs['return_dopd_fin'] = True
opds, wfe_dict = self.evolve_dopd(delta_time, slew_angles, **kwargs)
# Indices where slews occur
islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1
# Start and stop indices for each slew position
i1_arr = np.concatenate(([0], islew))
i2_arr = np.concatenate((islew, [len(slew_angles)]))
# Get average time at each position
dt_new = np.array([mn_func(delta_time[i1:i2].value) for i1, i2 in zip(i1_arr, i2_arr)])
dt_new = dt_new * delta_time.unit
if interpolate:
res = self.interp_dopds(delta_time, opds, dt_new, wfe_dict=wfe_dict, **kwargs)
if wfe_dict is None:
opds_new = res
wfe_dict_new = None
else:
opds_new, wfe_dict_new = res
return dt_new, opds_new, wfe_dict_new
# Averages of OPD at each position
if opds is not None:
opds_new = np.array([mn_func(opds[i1:i2], axis=0) for i1, i2 in zip(i1_arr, i2_arr)])
else:
opds_new = None
# Get average of each WFE drift component
if wfe_dict is not None:
wfe_dict_new = {}
for k in wfe_dict.keys():
wfe_dict_new[k] = np.array([mn_func(wfe_dict[k][i1:i2]) for i1, i2 in zip(i1_arr, i2_arr)])
if opds_new is not None:
wfe_dict_new['total'] = self.calc_rms(opds_new)*1e9
else:
wfe_dict = None
return dt_new, opds_new, wfe_dict_new
def opds_as_hdul(self, delta_time, slew_angles, delta_opds=None, wfe_dict=None,
case=None, add_main_opd=True, slew_averages=False,
return_ind=None, **kwargs):
"""Convert series of delta OPDS to HDUList"""
if delta_opds is None:
case = 'BOL' if case is None else case
kwargs['case'] = case
kwargs['return_wfe_amps'] = True
kwargs['return_dopd_fin'] = True
delta_opds, wfe_dict = self.evolve_dopd(delta_time, slew_angles, **kwargs)
if slew_averages:
res = self.slew_pos_averages(delta_time, slew_angles, opds=delta_opds,
wfe_dict=wfe_dict, **kwargs)
delta_time, delta_opds, wfe_dict = res
# Indices where slews occur
islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1
islew = np.concatenate(([0], islew))
slew_angles = slew_angles[islew]
nz, ny, nx = delta_opds.shape
# Indices where slews occur
islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1
islew = np.concatenate(([0], islew))
hdul = fits.HDUList()
for i in range(nz):
if i<islew[1]:
ang1 = ang2 = slew_angles[i]
else:
if i in islew:
ang1 = slew_angles[i-1]
ang2 = slew_angles[i]
# Skip if only returning a single OPD
if (return_ind is not None) and (i != return_ind):
continue
# Update header
dt = delta_time[i].to(u.day).to_string()
opd_im = self._opd_original + delta_opds[i] if add_main_opd else delta_opds[i]
hdu = fits.ImageHDU(data=opd_im, header=self.opd_header, name=f'OPD{i}')
hdr = hdu.header
hdr['BUNIT'] = 'meter'
hdr['DELTA_T'] = (dt, "Delta time after initial slew [d]")
hdr['STARTANG'] = (ang1, "Starting sun pitch angle [deg]")
hdr['ENDANG'] = (ang2, "Ending sun pitch angle [deg]")
hdr['THRMCASE'] = (case, "Thermal model case, beginning or end of life")
# if add_main_opd:
# hdr['OPDSLICE'] = (self.opd_slice, 'OPD slice index')
hdr['WFE_RMS'] = (self.calc_rms(hdu.data)*1e9, "RMS WFE [nm]")
# Include the WFE RMS inputs from each component
if wfe_dict is not None:
for k in wfe_dict.keys():
hdr[k] = (wfe_dict[k][i], f"{k} RMS delta WFE [nm]")
hdul.append(hdu)
return hdul
def plot_im(im, fig, ax, vlim=None, add_cbar=True, return_ax=False,
extent=None, cmap='RdBu_r'):
"""
Plot single image on some axes
"""
if vlim is None:
vlim = np.max(np.abs(im))
img = ax.imshow(im, cmap=cmap, vmin=-1*vlim, vmax=+1*vlim, extent=extent)
# Add colorbar
if add_cbar:
cbar = fig.colorbar(img, ax=ax)
cbar.set_label('Amplitude [nm]')
if return_ax and add_cbar:
return ax, cbar
elif return_ax:
return ax
def plot_opd(hdul, index=1, opd0=None, vlim1=None, vlim2=None):
"""
Plot OPDs images (full or delta)
"""
def calc_rms_nm(im):
ind = (im != 0) & (np.isfinite(im))
rms = np.sqrt((im[ind] ** 2).mean()) * 1e9
return rms
m_to_nm = 1e9
# Define OPD to compare delta OPD image
opd0 = hdul[0].data if opd0 is None else opd0
# Header and data for current image
header = hdul[index].header
opd = hdul[index].data
opd_diff = (opd - opd0)
rms_opd = calc_rms_nm(opd)
rms_diff = calc_rms_nm(opd_diff)
# Time since slew
delta_time = header['DELTA_T']
try:
pupilscale = header['PUPLSCAL']
s = opd.shape
extent = [a * pupilscale for a in [-s[0] / 2, s[0] / 2, -s[1] / 2, s[1] / 2]]
except KeyError:
extent = None
# Create figure
fig, axes = plt.subplots(1,2, figsize=(12,5))
ax = axes[0]
vlim = 3*rms_opd if vlim1 is None else vlim1
plot_im(opd * m_to_nm, fig, ax, vlim=vlim, extent=extent)
data_val, data_units = str.split(delta_time)
data_val = np.float(data_val)
if 'h' in data_units:
dt = data_val * u.hr
elif 'm' in data_units:
dt = data_val * u.min
elif 'd' in data_units:
dt = data_val * u.day
# Convert to hours
dt = dt.to('hr')
ax.set_title("Delta Time = {:.1f} (RMS = {:.2f} nm)".format(dt, rms_opd))
ax = axes[1]
vlim = 3*rms_diff if vlim2 is None else vlim2
plot_im(opd_diff * m_to_nm, fig, ax, vlim=vlim, extent=extent)
ax.set_title("Delta OPD = {:.2f} nm RMS".format(rms_diff))
fig.tight_layout()
plt.draw()
def slew_time(dist_asec):
"""
Given a slew distance (arcsec), calculate telescope slew time. Output is sec.
Data comes from JDOX website:
https://jwst-docs.stsci.edu/jppom/visit-overheads-timing-model/slew-times.
"""
# Slew value in arcsec
slew_arr = np.array([
0, 0.06, 0.0600001, 15, 20, 20.0000001, 30, 50,
100, 150, 300, 1000, 3600, 4000, 10000, 10800,
10800, 14400, 18000, 21600, 25200, 28800, 32400, 36000, 39600,
43200, 46800, 50400, 54000, 57600, 61200, 64800, 68400, 72000,
108000, 144000, 180000, 216000, 252000, 288000, 324000, 360000,
396000, 432000, 468000, 504000, 540000, 576000, 612000, 648000
])
# Slew times
tsec_arr = np.array([
0, 0, 20.48, 20.48, 23.296, 101.632, 116.224, 137.728,
173.568, 198.656, 250.112, 373.504, 572.416, 592.896, 804.864, 825.6, 521.216,
578.048, 628.608, 674.56, 716.928, 756.608, 793.856, 829.184, 862.848, 894.976,
925.824, 955.648, 984.32, 1012.224, 1039.104, 1065.344, 1090.816, 1115.648,
1336.448, 1537.408,1744, 1939.328, 2112.192, 2278.272, 2440.32, 2599.936,
2757.632, 2914.24, 3069.888, 3224.832, 3379.328, 3533.376, 3687.104, 3840.512
])
return np.interp(dist_asec, slew_arr, tsec_arr)
|
nilq/baby-python
|
python
|
import argparse
import contextlib
import collections
import grp
import hashlib
import logging
import io
import json
import os
import os.path
import platform
import pwd
import re
import shlex
import signal
import socket
import stat
import subprocess
import sys
import textwrap
import threading
import time
import uuid
from binascii import hexlify
from collections import namedtuple, deque, abc, Counter
from datetime import datetime, timezone, timedelta
from functools import partial, lru_cache
from itertools import islice
from operator import attrgetter
from string import Formatter
from shutil import get_terminal_size
import msgpack
import msgpack.fallback
from .logger import create_logger
logger = create_logger()
import borg.crypto.low_level
from . import __version__ as borg_version
from . import __version_tuple__ as borg_version_tuple
from . import chunker
from . import hashindex
from .constants import * # NOQA
'''
The global exit_code variable is used so that modules other than archiver can increase the program exit code if a
warning or error occurred during their operation. This is different from archiver.exit_code, which is only accessible
from the archiver object.
'''
exit_code = EXIT_SUCCESS
def set_ec(ec):
'''
Sets the exit code of the program, if an exit code higher or equal than this is set, this does nothing. This
makes EXIT_ERROR override EXIT_WARNING, etc..
ec: exit code to set
'''
global exit_code
exit_code = max(exit_code, ec)
return exit_code
class Error(Exception):
"""Error base class"""
# if we raise such an Error and it is only catched by the uppermost
# exception handler (that exits short after with the given exit_code),
# it is always a (fatal and abrupt) EXIT_ERROR, never just a warning.
exit_code = EXIT_ERROR
# show a traceback?
traceback = False
def __init__(self, *args):
super().__init__(*args)
self.args = args
def get_message(self):
return type(self).__doc__.format(*self.args)
__str__ = get_message
class ErrorWithTraceback(Error):
"""like Error, but show a traceback also"""
traceback = True
class IntegrityError(ErrorWithTraceback):
"""Data integrity error: {}"""
class DecompressionError(IntegrityError):
"""Decompression error: {}"""
class ExtensionModuleError(Error):
"""The Borg binary extension modules do not seem to be properly installed"""
class NoManifestError(Error):
"""Repository has no manifest."""
class PlaceholderError(Error):
"""Formatting Error: "{}".format({}): {}({})"""
class InvalidPlaceholder(PlaceholderError):
"""Invalid placeholder "{}" in string: {}"""
class PythonLibcTooOld(Error):
"""FATAL: this Python was compiled for a too old (g)libc and misses required functionality."""
def check_python():
required_funcs = {os.stat, os.utime, os.chown}
if not os.supports_follow_symlinks.issuperset(required_funcs):
raise PythonLibcTooOld
def check_extension_modules():
from . import platform, compress, item
if hashindex.API_VERSION != '1.1_01':
raise ExtensionModuleError
if chunker.API_VERSION != '1.1_01':
raise ExtensionModuleError
if compress.API_VERSION != '1.1_03':
raise ExtensionModuleError
if borg.crypto.low_level.API_VERSION != '1.1_01':
raise ExtensionModuleError
if platform.API_VERSION != platform.OS_API_VERSION != '1.1_01':
raise ExtensionModuleError
if item.API_VERSION != '1.1_02':
raise ExtensionModuleError
ArchiveInfo = namedtuple('ArchiveInfo', 'name id ts')
class Archives(abc.MutableMapping):
"""
Nice wrapper around the archives dict, making sure only valid types/values get in
and we can deal with str keys (and it internally encodes to byte keys) and either
str timestamps or datetime timestamps.
"""
def __init__(self):
# key: encoded archive name, value: dict(b'id': bytes_id, b'time': bytes_iso_ts)
self._archives = {}
def __len__(self):
return len(self._archives)
def __iter__(self):
return iter(safe_decode(name) for name in self._archives)
def __getitem__(self, name):
assert isinstance(name, str)
_name = safe_encode(name)
values = self._archives.get(_name)
if values is None:
raise KeyError
ts = parse_timestamp(values[b'time'].decode('utf-8'))
return ArchiveInfo(name=name, id=values[b'id'], ts=ts)
def __setitem__(self, name, info):
assert isinstance(name, str)
name = safe_encode(name)
assert isinstance(info, tuple)
id, ts = info
assert isinstance(id, bytes)
if isinstance(ts, datetime):
ts = ts.replace(tzinfo=None).isoformat()
assert isinstance(ts, str)
ts = ts.encode()
self._archives[name] = {b'id': id, b'time': ts}
def __delitem__(self, name):
assert isinstance(name, str)
name = safe_encode(name)
del self._archives[name]
def list(self, sort_by=(), reverse=False, prefix='', first=None, last=None):
"""
Inexpensive Archive.list_archives replacement if we just need .name, .id, .ts
Returns list of borg.helpers.ArchiveInfo instances.
sort_by can be a list of sort keys, they are applied in reverse order.
"""
if isinstance(sort_by, (str, bytes)):
raise TypeError('sort_by must be a sequence of str')
archives = [x for x in self.values() if x.name.startswith(prefix)]
for sortkey in reversed(sort_by):
archives.sort(key=attrgetter(sortkey))
if reverse or last:
archives.reverse()
n = first or last or len(archives)
return archives[:n]
def list_considering(self, args):
"""
get a list of archives, considering --first/last/prefix/sort cmdline args
"""
if args.location.archive:
raise Error('The options --first, --last and --prefix can only be used on repository targets.')
return self.list(sort_by=args.sort_by.split(','), prefix=args.prefix, first=args.first, last=args.last)
def set_raw_dict(self, d):
"""set the dict we get from the msgpack unpacker"""
for k, v in d.items():
assert isinstance(k, bytes)
assert isinstance(v, dict) and b'id' in v and b'time' in v
self._archives[k] = v
def get_raw_dict(self):
"""get the dict we can give to the msgpack packer"""
return self._archives
class Manifest:
MANIFEST_ID = b'\0' * 32
def __init__(self, key, repository, item_keys=None):
self.archives = Archives()
self.config = {}
self.key = key
self.repository = repository
self.item_keys = frozenset(item_keys) if item_keys is not None else ITEM_KEYS
self.tam_verified = False
self.timestamp = None
@property
def id_str(self):
return bin_to_hex(self.id)
@property
def last_timestamp(self):
return datetime.strptime(self.timestamp, "%Y-%m-%dT%H:%M:%S.%f")
@classmethod
def load(cls, repository, key=None, force_tam_not_required=False):
from .item import ManifestItem
from .crypto.key import key_factory, tam_required_file, tam_required
from .repository import Repository
try:
cdata = repository.get(cls.MANIFEST_ID)
except Repository.ObjectNotFound:
raise NoManifestError
if not key:
key = key_factory(repository, cdata)
manifest = cls(key, repository)
data = key.decrypt(None, cdata)
manifest_dict, manifest.tam_verified = key.unpack_and_verify_manifest(data, force_tam_not_required=force_tam_not_required)
m = ManifestItem(internal_dict=manifest_dict)
manifest.id = key.id_hash(data)
if m.get('version') != 1:
raise ValueError('Invalid manifest version')
manifest.archives.set_raw_dict(m.archives)
manifest.timestamp = m.get('timestamp')
manifest.config = m.config
# valid item keys are whatever is known in the repo or every key we know
manifest.item_keys = ITEM_KEYS | frozenset(key.decode() for key in m.get('item_keys', []))
if manifest.tam_verified:
manifest_required = manifest.config.get(b'tam_required', False)
security_required = tam_required(repository)
if manifest_required and not security_required:
logger.debug('Manifest is TAM verified and says TAM is required, updating security database...')
file = tam_required_file(repository)
open(file, 'w').close()
if not manifest_required and security_required:
logger.debug('Manifest is TAM verified and says TAM is *not* required, updating security database...')
os.unlink(tam_required_file(repository))
return manifest, key
def write(self):
from .item import ManifestItem
if self.key.tam_required:
self.config[b'tam_required'] = True
# self.timestamp needs to be strictly monotonically increasing. Clocks often are not set correctly
if self.timestamp is None:
self.timestamp = datetime.utcnow().isoformat()
else:
prev_ts = self.last_timestamp
incremented = (prev_ts + timedelta(microseconds=1)).isoformat()
self.timestamp = max(incremented, datetime.utcnow().isoformat())
manifest = ManifestItem(
version=1,
archives=StableDict(self.archives.get_raw_dict()),
timestamp=self.timestamp,
config=StableDict(self.config),
item_keys=tuple(sorted(self.item_keys)),
)
self.tam_verified = True
data = self.key.pack_and_authenticate_metadata(manifest.as_dict())
self.id = self.key.id_hash(data)
self.repository.put(self.MANIFEST_ID, self.key.encrypt(data))
def prune_within(archives, within):
multiplier = {'H': 1, 'd': 24, 'w': 24 * 7, 'm': 24 * 31, 'y': 24 * 365}
try:
hours = int(within[:-1]) * multiplier[within[-1]]
except (KeyError, ValueError):
# I don't like how this displays the original exception too:
raise argparse.ArgumentTypeError('Unable to parse --keep-within option: "%s"' % within)
if hours <= 0:
raise argparse.ArgumentTypeError('Number specified using --keep-within option must be positive')
target = datetime.now(timezone.utc) - timedelta(seconds=hours * 3600)
return [a for a in archives if a.ts > target]
def prune_split(archives, pattern, n, skip=[]):
last = None
keep = []
if n == 0:
return keep
for a in sorted(archives, key=attrgetter('ts'), reverse=True):
period = to_localtime(a.ts).strftime(pattern)
if period != last:
last = period
if a not in skip:
keep.append(a)
if len(keep) == n:
break
return keep
def get_home_dir():
"""Get user's home directory while preferring a possibly set HOME
environment variable
"""
# os.path.expanduser() behaves differently for '~' and '~someuser' as
# parameters: when called with an explicit username, the possibly set
# environment variable HOME is no longer respected. So we have to check if
# it is set and only expand the user's home directory if HOME is unset.
if os.environ.get('HOME', ''):
return os.environ.get('HOME')
else:
return os.path.expanduser('~%s' % os.environ.get('USER', ''))
def get_keys_dir():
"""Determine where to repository keys and cache"""
xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.join(get_home_dir(), '.config'))
keys_dir = os.environ.get('BORG_KEYS_DIR', os.path.join(xdg_config, 'borg', 'keys'))
if not os.path.exists(keys_dir):
os.makedirs(keys_dir)
os.chmod(keys_dir, stat.S_IRWXU)
return keys_dir
def get_security_dir(repository_id=None):
"""Determine where to store local security information."""
xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.join(get_home_dir(), '.config'))
security_dir = os.environ.get('BORG_SECURITY_DIR', os.path.join(xdg_config, 'borg', 'security'))
if repository_id:
security_dir = os.path.join(security_dir, repository_id)
if not os.path.exists(security_dir):
os.makedirs(security_dir)
os.chmod(security_dir, stat.S_IRWXU)
return security_dir
def get_cache_dir():
"""Determine where to repository keys and cache"""
xdg_cache = os.environ.get('XDG_CACHE_HOME', os.path.join(get_home_dir(), '.cache'))
cache_dir = os.environ.get('BORG_CACHE_DIR', os.path.join(xdg_cache, 'borg'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
os.chmod(cache_dir, stat.S_IRWXU)
with open(os.path.join(cache_dir, CACHE_TAG_NAME), 'wb') as fd:
fd.write(CACHE_TAG_CONTENTS)
fd.write(textwrap.dedent("""
# This file is a cache directory tag created by Borg.
# For information about cache directory tags, see:
# http://www.brynosaurus.com/cachedir/
""").encode('ascii'))
return cache_dir
def to_localtime(ts):
"""Convert datetime object from UTC to local time zone"""
return datetime(*time.localtime((ts - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds())[:6])
def parse_timestamp(timestamp):
"""Parse a ISO 8601 timestamp string"""
if '.' in timestamp: # microseconds might not be present
return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=timezone.utc)
else:
return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc)
def timestamp(s):
"""Convert a --timestamp=s argument to a datetime object"""
try:
# is it pointing to a file / directory?
ts = safe_s(os.stat(s).st_mtime)
return datetime.utcfromtimestamp(ts)
except OSError:
# didn't work, try parsing as timestamp. UTC, no TZ, no microsecs support.
for format in ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S+00:00',
'%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M',
'%Y-%m-%d', '%Y-%j',
):
try:
return datetime.strptime(s, format)
except ValueError:
continue
raise ValueError
def ChunkerParams(s):
if s.strip().lower() == "default":
return CHUNKER_PARAMS
chunk_min, chunk_max, chunk_mask, window_size = s.split(',')
if int(chunk_max) > 23:
raise ValueError('max. chunk size exponent must not be more than 23 (2^23 = 8MiB max. chunk size)')
return int(chunk_min), int(chunk_max), int(chunk_mask), int(window_size)
def dir_is_cachedir(path):
"""Determines whether the specified path is a cache directory (and
therefore should potentially be excluded from the backup) according to
the CACHEDIR.TAG protocol
(http://www.brynosaurus.com/cachedir/spec.html).
"""
tag_path = os.path.join(path, CACHE_TAG_NAME)
try:
if os.path.exists(tag_path):
with open(tag_path, 'rb') as tag_file:
tag_data = tag_file.read(len(CACHE_TAG_CONTENTS))
if tag_data == CACHE_TAG_CONTENTS:
return True
except OSError:
pass
return False
def dir_is_tagged(path, exclude_caches, exclude_if_present):
"""Determines whether the specified path is excluded by being a cache
directory or containing user-specified tag files/directories. Returns a
list of the paths of the tag files/directories (either CACHEDIR.TAG or the
matching user-specified files/directories).
"""
tag_paths = []
if exclude_caches and dir_is_cachedir(path):
tag_paths.append(os.path.join(path, CACHE_TAG_NAME))
if exclude_if_present is not None:
for tag in exclude_if_present:
tag_path = os.path.join(path, tag)
if os.path.exists(tag_path):
tag_paths.append(tag_path)
return tag_paths
def partial_format(format, mapping):
"""
Apply format.format_map(mapping) while preserving unknown keys
Does not support attribute access, indexing and ![rsa] conversions
"""
for key, value in mapping.items():
key = re.escape(key)
format = re.sub(r'(?<!\{)((\{%s\})|(\{%s:[^\}]*\}))' % (key, key),
lambda match: match.group(1).format_map(mapping),
format)
return format
class DatetimeWrapper:
def __init__(self, dt):
self.dt = dt
def __format__(self, format_spec):
if format_spec == '':
format_spec = '%Y-%m-%dT%H:%M:%S'
return self.dt.__format__(format_spec)
def format_line(format, data):
for _, key, _, conversion in Formatter().parse(format):
if not key:
continue
if conversion or key not in data:
raise InvalidPlaceholder(key, format)
try:
return format.format_map(data)
except Exception as e:
raise PlaceholderError(format, data, e.__class__.__name__, str(e))
def replace_placeholders(text):
"""Replace placeholders in text with their values."""
current_time = datetime.now()
data = {
'pid': os.getpid(),
'fqdn': socket.getfqdn(),
'hostname': socket.gethostname(),
'now': DatetimeWrapper(current_time.now()),
'utcnow': DatetimeWrapper(current_time.utcnow()),
'user': uid2user(os.getuid(), os.getuid()),
'uuid4': str(uuid.uuid4()),
'borgversion': borg_version,
'borgmajor': '%d' % borg_version_tuple[:1],
'borgminor': '%d.%d' % borg_version_tuple[:2],
'borgpatch': '%d.%d.%d' % borg_version_tuple[:3],
}
return format_line(text, data)
PrefixSpec = replace_placeholders
HUMAN_SORT_KEYS = ['timestamp'] + list(ArchiveInfo._fields)
HUMAN_SORT_KEYS.remove('ts')
def SortBySpec(text):
for token in text.split(','):
if token not in HUMAN_SORT_KEYS:
raise ValueError('Invalid sort key: %s' % token)
return text.replace('timestamp', 'ts')
# Not too rarely, we get crappy timestamps from the fs, that overflow some computations.
# As they are crap anyway (valid filesystem timestamps always refer to the past up to
# the present, but never to the future), nothing is lost if we just clamp them to the
# maximum value we can support.
# As long as people are using borg on 32bit platforms to access borg archives, we must
# keep this value True. But we can expect that we can stop supporting 32bit platforms
# well before coming close to the year 2038, so this will never be a practical problem.
SUPPORT_32BIT_PLATFORMS = True # set this to False before y2038.
if SUPPORT_32BIT_PLATFORMS:
# second timestamps will fit into a signed int32 (platform time_t limit).
# nanosecond timestamps thus will naturally fit into a signed int64.
# subtract last 48h to avoid any issues that could be caused by tz calculations.
# this is in the year 2038, so it is also less than y9999 (which is a datetime internal limit).
# msgpack can pack up to uint64.
MAX_S = 2**31-1 - 48*3600
MAX_NS = MAX_S * 1000000000
else:
# nanosecond timestamps will fit into a signed int64.
# subtract last 48h to avoid any issues that could be caused by tz calculations.
# this is in the year 2262, so it is also less than y9999 (which is a datetime internal limit).
# round down to 1e9 multiple, so MAX_NS corresponds precisely to a integer MAX_S.
# msgpack can pack up to uint64.
MAX_NS = (2**63-1 - 48*3600*1000000000) // 1000000000 * 1000000000
MAX_S = MAX_NS // 1000000000
def safe_s(ts):
if 0 <= ts <= MAX_S:
return ts
elif ts < 0:
return 0
else:
return MAX_S
def safe_ns(ts):
if 0 <= ts <= MAX_NS:
return ts
elif ts < 0:
return 0
else:
return MAX_NS
def safe_timestamp(item_timestamp_ns):
t_ns = safe_ns(item_timestamp_ns)
return datetime.fromtimestamp(t_ns / 1e9)
def format_time(t):
"""use ISO-8601 date and time format
"""
return t.strftime('%a, %Y-%m-%d %H:%M:%S')
def format_timedelta(td):
"""Format timedelta in a human friendly format
"""
ts = td.total_seconds()
s = ts % 60
m = int(ts / 60) % 60
h = int(ts / 3600) % 24
txt = '%.2f seconds' % s
if m:
txt = '%d minutes %s' % (m, txt)
if h:
txt = '%d hours %s' % (h, txt)
if td.days:
txt = '%d days %s' % (td.days, txt)
return txt
def format_file_size(v, precision=2, sign=False):
"""Format file size into a human friendly format
"""
return sizeof_fmt_decimal(v, suffix='B', sep=' ', precision=precision, sign=sign)
class FileSize(int):
def __format__(self, format_spec):
return format_file_size(int(self)).__format__(format_spec)
def parse_file_size(s):
"""Return int from file size (1234, 55G, 1.7T)."""
if not s:
return int(s) # will raise
suffix = s[-1]
power = 1000
try:
factor = {
'K': power,
'M': power**2,
'G': power**3,
'T': power**4,
'P': power**5,
}[suffix]
s = s[:-1]
except KeyError:
factor = 1
return int(float(s) * factor)
def sizeof_fmt(num, suffix='B', units=None, power=None, sep='', precision=2, sign=False):
prefix = '+' if sign and num > 0 else ''
for unit in units[:-1]:
if abs(round(num, precision)) < power:
if isinstance(num, int):
return "{}{}{}{}{}".format(prefix, num, sep, unit, suffix)
else:
return "{}{:3.{}f}{}{}{}".format(prefix, num, precision, sep, unit, suffix)
num /= float(power)
return "{}{:.{}f}{}{}{}".format(prefix, num, precision, sep, units[-1], suffix)
def sizeof_fmt_iec(num, suffix='B', sep='', precision=2, sign=False):
return sizeof_fmt(num, suffix=suffix, sep=sep, precision=precision, sign=sign,
units=['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'], power=1024)
def sizeof_fmt_decimal(num, suffix='B', sep='', precision=2, sign=False):
return sizeof_fmt(num, suffix=suffix, sep=sep, precision=precision, sign=sign,
units=['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'], power=1000)
def format_archive(archive):
return '%-36s %s [%s]' % (
archive.name,
format_time(to_localtime(archive.ts)),
bin_to_hex(archive.id),
)
class Buffer:
"""
provide a thread-local buffer
"""
class MemoryLimitExceeded(Error, OSError):
"""Requested buffer size {} is above the limit of {}."""
def __init__(self, allocator, size=4096, limit=None):
"""
Initialize the buffer: use allocator(size) call to allocate a buffer.
Optionally, set the upper <limit> for the buffer size.
"""
assert callable(allocator), 'must give alloc(size) function as first param'
assert limit is None or size <= limit, 'initial size must be <= limit'
self._thread_local = threading.local()
self.allocator = allocator
self.limit = limit
self.resize(size, init=True)
def __len__(self):
return len(self._thread_local.buffer)
def resize(self, size, init=False):
"""
resize the buffer - to avoid frequent reallocation, we usually always grow (if needed).
giving init=True it is possible to first-time initialize or shrink the buffer.
if a buffer size beyond the limit is requested, raise Buffer.MemoryLimitExceeded (OSError).
"""
size = int(size)
if self.limit is not None and size > self.limit:
raise Buffer.MemoryLimitExceeded(size, self.limit)
if init or len(self) < size:
self._thread_local.buffer = self.allocator(size)
def get(self, size=None, init=False):
"""
return a buffer of at least the requested size (None: any current size).
init=True can be given to trigger shrinking of the buffer to the given size.
"""
if size is not None:
self.resize(size, init)
return self._thread_local.buffer
@lru_cache(maxsize=None)
def uid2user(uid, default=None):
try:
return pwd.getpwuid(uid).pw_name
except KeyError:
return default
@lru_cache(maxsize=None)
def user2uid(user, default=None):
try:
return user and pwd.getpwnam(user).pw_uid
except KeyError:
return default
@lru_cache(maxsize=None)
def gid2group(gid, default=None):
try:
return grp.getgrgid(gid).gr_name
except KeyError:
return default
@lru_cache(maxsize=None)
def group2gid(group, default=None):
try:
return group and grp.getgrnam(group).gr_gid
except KeyError:
return default
def posix_acl_use_stored_uid_gid(acl):
"""Replace the user/group field with the stored uid/gid
"""
entries = []
for entry in safe_decode(acl).split('\n'):
if entry:
fields = entry.split(':')
if len(fields) == 4:
entries.append(':'.join([fields[0], fields[3], fields[2]]))
else:
entries.append(entry)
return safe_encode('\n'.join(entries))
def safe_decode(s, coding='utf-8', errors='surrogateescape'):
"""decode bytes to str, with round-tripping "invalid" bytes"""
if s is None:
return None
return s.decode(coding, errors)
def safe_encode(s, coding='utf-8', errors='surrogateescape'):
"""encode str to bytes, with round-tripping "invalid" bytes"""
if s is None:
return None
return s.encode(coding, errors)
def bin_to_hex(binary):
return hexlify(binary).decode('ascii')
class Location:
"""Object representing a repository / archive location
"""
proto = user = _host = port = path = archive = None
# user must not contain "@", ":" or "/".
# Quoting adduser error message:
# "To avoid problems, the username should consist only of letters, digits,
# underscores, periods, at signs and dashes, and not start with a dash
# (as defined by IEEE Std 1003.1-2001)."
# We use "@" as separator between username and hostname, so we must
# disallow it within the pure username part.
optional_user_re = r"""
(?:(?P<user>[^@:/]+)@)?
"""
# path must not contain :: (it ends at :: or string end), but may contain single colons.
# to avoid ambiguities with other regexes, it must also not start with ":" nor with "//" nor with "ssh://".
scp_path_re = r"""
(?!(:|//|ssh://)) # not starting with ":" or // or ssh://
(?P<path>([^:]|(:(?!:)))+) # any chars, but no "::"
"""
# file_path must not contain :: (it ends at :: or string end), but may contain single colons.
# it must start with a / and that slash is part of the path.
file_path_re = r"""
(?P<path>(([^/]*)/([^:]|(:(?!:)))+)) # start opt. servername, then /, then any chars, but no "::"
"""
# abs_path must not contain :: (it ends at :: or string end), but may contain single colons.
# it must start with a / and that slash is part of the path.
abs_path_re = r"""
(?P<path>(/([^:]|(:(?!:)))+)) # start with /, then any chars, but no "::"
"""
# optional ::archive_name at the end, archive name must not contain "/".
# borg mount's FUSE filesystem creates one level of directories from
# the archive names and of course "/" is not valid in a directory name.
optional_archive_re = r"""
(?:
:: # "::" as separator
(?P<archive>[^/]+) # archive name must not contain "/"
)?$""" # must match until the end
# regexes for misc. kinds of supported location specifiers:
ssh_re = re.compile(r"""
(?P<proto>ssh):// # ssh://
""" + optional_user_re + r""" # user@ (optional)
(?P<host>([^:/]+|\[[0-9a-fA-F:.]+\]))(?::(?P<port>\d+))? # host or host:port or [ipv6] or [ipv6]:port
""" + abs_path_re + optional_archive_re, re.VERBOSE) # path or path::archive
file_re = re.compile(r"""
(?P<proto>file):// # file://
""" + file_path_re + optional_archive_re, re.VERBOSE) # servername/path, path or path::archive
# note: scp_re is also use for local paths
scp_re = re.compile(r"""
(
""" + optional_user_re + r""" # user@ (optional)
(?P<host>([^:/]+|\[[0-9a-fA-F:.]+\])): # host: (don't match / or [ipv6] in host to disambiguate from file:)
)? # user@host: part is optional
""" + scp_path_re + optional_archive_re, re.VERBOSE) # path with optional archive
# get the repo from BORG_REPO env and the optional archive from param.
# if the syntax requires giving REPOSITORY (see "borg mount"),
# use "::" to let it use the env var.
# if REPOSITORY argument is optional, it'll automatically use the env.
env_re = re.compile(r""" # the repo part is fetched from BORG_REPO
(?:::$) # just "::" is ok (when a pos. arg is required, no archive)
| # or
""" + optional_archive_re, re.VERBOSE) # archive name (optional, may be empty)
def __init__(self, text=''):
self.orig = text
if not self.parse(self.orig):
raise ValueError('Location: parse failed: %s' % self.orig)
def parse(self, text):
text = replace_placeholders(text)
valid = self._parse(text)
if valid:
return True
m = self.env_re.match(text)
if not m:
return False
repo = os.environ.get('BORG_REPO')
if repo is None:
return False
valid = self._parse(repo)
if not valid:
return False
self.archive = m.group('archive')
return True
def _parse(self, text):
def normpath_special(p):
# avoid that normpath strips away our relative path hack and even makes p absolute
relative = p.startswith('/./')
p = os.path.normpath(p)
return ('/.' + p) if relative else p
m = self.ssh_re.match(text)
if m:
self.proto = m.group('proto')
self.user = m.group('user')
self._host = m.group('host')
self.port = m.group('port') and int(m.group('port')) or None
self.path = normpath_special(m.group('path'))
self.archive = m.group('archive')
return True
m = self.file_re.match(text)
if m:
self.proto = m.group('proto')
self.path = normpath_special(m.group('path'))
self.archive = m.group('archive')
return True
m = self.scp_re.match(text)
if m:
self.user = m.group('user')
self._host = m.group('host')
self.path = normpath_special(m.group('path'))
self.archive = m.group('archive')
self.proto = self._host and 'ssh' or 'file'
return True
return False
def __str__(self):
items = [
'proto=%r' % self.proto,
'user=%r' % self.user,
'host=%r' % self.host,
'port=%r' % self.port,
'path=%r' % self.path,
'archive=%r' % self.archive,
]
return ', '.join(items)
def to_key_filename(self):
name = re.sub('[^\w]', '_', self.path).strip('_')
if self.proto != 'file':
name = re.sub('[^\w]', '_', self.host) + '__' + name
if len(name) > 100:
# Limit file names to some reasonable length. Most file systems
# limit them to 255 [unit of choice]; due to variations in unicode
# handling we truncate to 100 *characters*.
name = name[:100]
return os.path.join(get_keys_dir(), name)
def __repr__(self):
return "Location(%s)" % self
@property
def host(self):
# strip square brackets used for IPv6 addrs
if self._host is not None:
return self._host.lstrip('[').rstrip(']')
def canonical_path(self):
if self.proto == 'file':
return self.path
else:
if self.path and self.path.startswith('~'):
path = '/' + self.path # /~/x = path x relative to home dir
elif self.path and not self.path.startswith('/'):
path = '/./' + self.path # /./x = path x relative to cwd
else:
path = self.path
return 'ssh://{}{}{}{}'.format('{}@'.format(self.user) if self.user else '',
self._host, # needed for ipv6 addrs
':{}'.format(self.port) if self.port else '',
path)
def location_validator(archive=None):
def validator(text):
try:
loc = Location(text)
except ValueError:
raise argparse.ArgumentTypeError('Invalid location format: "%s"' % text) from None
if archive is True and not loc.archive:
raise argparse.ArgumentTypeError('"%s": No archive specified' % text)
elif archive is False and loc.archive:
raise argparse.ArgumentTypeError('"%s" No archive can be specified' % text)
return loc
return validator
def archivename_validator():
def validator(text):
if '/' in text or '::' in text or not text:
raise argparse.ArgumentTypeError('Invalid repository name: "%s"' % text)
return text
return validator
def decode_dict(d, keys, encoding='utf-8', errors='surrogateescape'):
for key in keys:
if isinstance(d.get(key), bytes):
d[key] = d[key].decode(encoding, errors)
return d
def prepare_dump_dict(d):
def decode_bytes(value):
# this should somehow be reversible later, but usual strings should
# look nice and chunk ids should mostly show in hex. Use a special
# inband signaling character (ASCII DEL) to distinguish between
# decoded and hex mode.
if not value.startswith(b'\x7f'):
try:
value = value.decode()
return value
except UnicodeDecodeError:
pass
return '\u007f' + bin_to_hex(value)
def decode_tuple(t):
res = []
for value in t:
if isinstance(value, dict):
value = decode(value)
elif isinstance(value, tuple) or isinstance(value, list):
value = decode_tuple(value)
elif isinstance(value, bytes):
value = decode_bytes(value)
res.append(value)
return res
def decode(d):
res = collections.OrderedDict()
for key, value in d.items():
if isinstance(value, dict):
value = decode(value)
elif isinstance(value, (tuple, list)):
value = decode_tuple(value)
elif isinstance(value, bytes):
value = decode_bytes(value)
if isinstance(key, bytes):
key = key.decode()
res[key] = value
return res
return decode(d)
def remove_surrogates(s, errors='replace'):
"""Replace surrogates generated by fsdecode with '?'
"""
return s.encode('utf-8', errors).decode('utf-8')
_safe_re = re.compile(r'^((\.\.)?/+)+')
def make_path_safe(path):
"""Make path safe by making it relative and local
"""
return _safe_re.sub('', path) or '.'
def daemonize():
"""Detach process from controlling terminal and run in background
"""
pid = os.fork()
if pid:
os._exit(0)
os.setsid()
pid = os.fork()
if pid:
os._exit(0)
os.chdir('/')
os.close(0)
os.close(1)
os.close(2)
fd = os.open(os.devnull, os.O_RDWR)
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
class StableDict(dict):
"""A dict subclass with stable items() ordering"""
def items(self):
return sorted(super().items())
def bigint_to_int(mtime):
"""Convert bytearray to int
"""
if isinstance(mtime, bytes):
return int.from_bytes(mtime, 'little', signed=True)
return mtime
def int_to_bigint(value):
"""Convert integers larger than 64 bits to bytearray
Smaller integers are left alone
"""
if value.bit_length() > 63:
return value.to_bytes((value.bit_length() + 9) // 8, 'little', signed=True)
return value
def is_slow_msgpack():
return msgpack.Packer is msgpack.fallback.Packer
FALSISH = ('No', 'NO', 'no', 'N', 'n', '0', )
TRUISH = ('Yes', 'YES', 'yes', 'Y', 'y', '1', )
DEFAULTISH = ('Default', 'DEFAULT', 'default', 'D', 'd', '', )
def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
retry_msg=None, invalid_msg=None, env_msg='{} (from {})',
falsish=FALSISH, truish=TRUISH, defaultish=DEFAULTISH,
default=False, retry=True, env_var_override=None, ofile=None, input=input, prompt=True,
msgid=None):
"""Output <msg> (usually a question) and let user input an answer.
Qualifies the answer according to falsish, truish and defaultish as True, False or <default>.
If it didn't qualify and retry is False (no retries wanted), return the default [which
defaults to False]. If retry is True let user retry answering until answer is qualified.
If env_var_override is given and this var is present in the environment, do not ask
the user, but just use the env var contents as answer as if it was typed in.
Otherwise read input from stdin and proceed as normal.
If EOF is received instead an input or an invalid input without retry possibility,
return default.
:param msg: introducing message to output on ofile, no \n is added [None]
:param retry_msg: retry message to output on ofile, no \n is added [None]
:param false_msg: message to output before returning False [None]
:param true_msg: message to output before returning True [None]
:param default_msg: message to output before returning a <default> [None]
:param invalid_msg: message to output after a invalid answer was given [None]
:param env_msg: message to output when using input from env_var_override ['{} (from {})'],
needs to have 2 placeholders for answer and env var name
:param falsish: sequence of answers qualifying as False
:param truish: sequence of answers qualifying as True
:param defaultish: sequence of answers qualifying as <default>
:param default: default return value (defaultish answer was given or no-answer condition) [False]
:param retry: if True and input is incorrect, retry. Otherwise return default. [True]
:param env_var_override: environment variable name [None]
:param ofile: output stream [sys.stderr]
:param input: input function [input from builtins]
:return: boolean answer value, True or False
"""
def output(msg, msg_type, is_prompt=False, **kwargs):
json_output = getattr(logging.getLogger('borg'), 'json', False)
if json_output:
kwargs.update(dict(
type='question_%s' % msg_type,
msgid=msgid,
message=msg,
))
print(json.dumps(kwargs), file=sys.stderr)
else:
if is_prompt:
print(msg, file=ofile, end='', flush=True)
else:
print(msg, file=ofile)
msgid = msgid or env_var_override
# note: we do not assign sys.stderr as default above, so it is
# really evaluated NOW, not at function definition time.
if ofile is None:
ofile = sys.stderr
if default not in (True, False):
raise ValueError("invalid default value, must be True or False")
if msg:
output(msg, 'prompt', is_prompt=True)
while True:
answer = None
if env_var_override:
answer = os.environ.get(env_var_override)
if answer is not None and env_msg:
output(env_msg.format(answer, env_var_override), 'env_answer', env_var=env_var_override)
if answer is None:
if not prompt:
return default
try:
answer = input()
except EOFError:
# avoid defaultish[0], defaultish could be empty
answer = truish[0] if default else falsish[0]
if answer in defaultish:
if default_msg:
output(default_msg, 'accepted_default')
return default
if answer in truish:
if true_msg:
output(true_msg, 'accepted_true')
return True
if answer in falsish:
if false_msg:
output(false_msg, 'accepted_false')
return False
# if we get here, the answer was invalid
if invalid_msg:
output(invalid_msg, 'invalid_answer')
if not retry:
return default
if retry_msg:
output(retry_msg, 'prompt_retry', is_prompt=True)
# in case we used an environment variable and it gave an invalid answer, do not use it again:
env_var_override = None
def hostname_is_unique():
return yes(env_var_override='BORG_HOSTNAME_IS_UNIQUE', prompt=False, env_msg=None, default=True)
def ellipsis_truncate(msg, space):
"""
shorten a long string by adding ellipsis between it and return it, example:
this_is_a_very_long_string -------> this_is..._string
"""
from .platform import swidth
ellipsis_width = swidth('...')
msg_width = swidth(msg)
if space < 8:
# if there is very little space, just show ...
return '...' + ' ' * (space - ellipsis_width)
if space < ellipsis_width + msg_width:
return '%s...%s' % (swidth_slice(msg, space // 2 - ellipsis_width),
swidth_slice(msg, -space // 2))
return msg + ' ' * (space - msg_width)
class ProgressIndicatorBase:
LOGGER = 'borg.output.progress'
JSON_TYPE = None
json = False
operation_id_counter = 0
@classmethod
def operation_id(cls):
"""Unique number, can be used by receiving applications to distinguish different operations."""
cls.operation_id_counter += 1
return cls.operation_id_counter
def __init__(self, msgid=None):
self.handler = None
self.logger = logging.getLogger(self.LOGGER)
self.id = self.operation_id()
self.msgid = msgid
# If there are no handlers, set one up explicitly because the
# terminator and propagation needs to be set. If there are,
# they must have been set up by BORG_LOGGING_CONF: skip setup.
if not self.logger.handlers:
self.handler = logging.StreamHandler(stream=sys.stderr)
self.handler.setLevel(logging.INFO)
logger = logging.getLogger('borg')
# Some special attributes on the borg logger, created by setup_logging
# But also be able to work without that
try:
formatter = logger.formatter
terminator = '\n' if logger.json else '\r'
self.json = logger.json
except AttributeError:
terminator = '\r'
else:
self.handler.setFormatter(formatter)
self.handler.terminator = terminator
self.logger.addHandler(self.handler)
if self.logger.level == logging.NOTSET:
self.logger.setLevel(logging.WARN)
self.logger.propagate = False
# If --progress is not set then the progress logger level will be WARN
# due to setup_implied_logging (it may be NOTSET with a logging config file,
# but the interactions there are generally unclear), so self.emit becomes
# False, which is correct.
# If --progress is set then the level will be INFO as per setup_implied_logging;
# note that this is always the case for serve processes due to a "args.progress |= is_serve".
# In this case self.emit is True.
self.emit = self.logger.getEffectiveLevel() == logging.INFO
def __del__(self):
if self.handler is not None:
self.logger.removeHandler(self.handler)
self.handler.close()
def output_json(self, *, finished=False, **kwargs):
assert self.json
if not self.emit:
return
kwargs.update(dict(
operation=self.id,
msgid=self.msgid,
type=self.JSON_TYPE,
finished=finished,
time=time.time(),
))
print(json.dumps(kwargs), file=sys.stderr)
def finish(self):
if self.json:
self.output_json(finished=True)
else:
self.output('')
def justify_to_terminal_size(message):
terminal_space = get_terminal_size(fallback=(-1, -1))[0]
# justify only if we are outputting to a terminal
if terminal_space != -1:
return message.ljust(terminal_space)
return message
class ProgressIndicatorMessage(ProgressIndicatorBase):
JSON_TYPE = 'progress_message'
def output(self, msg):
if self.json:
self.output_json(message=msg)
else:
self.logger.info(justify_to_terminal_size(msg))
class ProgressIndicatorPercent(ProgressIndicatorBase):
JSON_TYPE = 'progress_percent'
def __init__(self, total=0, step=5, start=0, msg="%3.0f%%", msgid=None):
"""
Percentage-based progress indicator
:param total: total amount of items
:param step: step size in percent
:param start: at which percent value to start
:param msg: output message, must contain one %f placeholder for the percentage
"""
self.counter = 0 # 0 .. (total-1)
self.total = total
self.trigger_at = start # output next percentage value when reaching (at least) this
self.step = step
self.msg = msg
super().__init__(msgid=msgid)
def progress(self, current=None, increase=1):
if current is not None:
self.counter = current
pct = self.counter * 100 / self.total
self.counter += increase
if pct >= self.trigger_at:
self.trigger_at += self.step
return pct
def show(self, current=None, increase=1, info=None):
"""
Show and output the progress message
:param current: set the current percentage [None]
:param increase: increase the current percentage [None]
:param info: array of strings to be formatted with msg [None]
"""
pct = self.progress(current, increase)
if pct is not None:
# truncate the last argument, if no space is available
if info is not None:
if not self.json:
# no need to truncate if we're not outputing to a terminal
terminal_space = get_terminal_size(fallback=(-1, -1))[0]
if terminal_space != -1:
space = terminal_space - len(self.msg % tuple([pct] + info[:-1] + ['']))
info[-1] = ellipsis_truncate(info[-1], space)
return self.output(self.msg % tuple([pct] + info), justify=False, info=info)
return self.output(self.msg % pct)
def output(self, message, justify=True, info=None):
if self.json:
self.output_json(message=message, current=self.counter, total=self.total, info=info)
else:
if justify:
message = justify_to_terminal_size(message)
self.logger.info(message)
class ProgressIndicatorEndless:
def __init__(self, step=10, file=None):
"""
Progress indicator (long row of dots)
:param step: every Nth call, call the func
:param file: output file, default: sys.stderr
"""
self.counter = 0 # call counter
self.triggered = 0 # increases 1 per trigger event
self.step = step # trigger every <step> calls
if file is None:
file = sys.stderr
self.file = file
def progress(self):
self.counter += 1
trigger = self.counter % self.step == 0
if trigger:
self.triggered += 1
return trigger
def show(self):
trigger = self.progress()
if trigger:
return self.output(self.triggered)
def output(self, triggered):
print('.', end='', file=self.file, flush=True)
def finish(self):
print(file=self.file)
def sysinfo():
info = []
info.append('Platform: %s' % (' '.join(platform.uname()), ))
if sys.platform.startswith('linux'):
info.append('Linux: %s %s %s' % platform.linux_distribution())
info.append('Borg: %s Python: %s %s' % (borg_version, platform.python_implementation(), platform.python_version()))
info.append('PID: %d CWD: %s' % (os.getpid(), os.getcwd()))
info.append('sys.argv: %r' % sys.argv)
info.append('SSH_ORIGINAL_COMMAND: %r' % os.environ.get('SSH_ORIGINAL_COMMAND'))
info.append('')
return '\n'.join(info)
def log_multi(*msgs, level=logging.INFO, logger=logger):
"""
log multiple lines of text, each line by a separate logging call for cosmetic reasons
each positional argument may be a single or multiple lines (separated by newlines) of text.
"""
lines = []
for msg in msgs:
lines.extend(msg.splitlines())
for line in lines:
logger.log(level, line)
class BaseFormatter:
FIXED_KEYS = {
# Formatting aids
'LF': '\n',
'SPACE': ' ',
'TAB': '\t',
'CR': '\r',
'NUL': '\0',
'NEWLINE': os.linesep,
'NL': os.linesep,
}
def get_item_data(self, item):
raise NotImplementedError
def format_item(self, item):
return self.format.format_map(self.get_item_data(item))
@staticmethod
def keys_help():
return " - NEWLINE: OS dependent line separator\n" \
" - NL: alias of NEWLINE\n" \
" - NUL: NUL character for creating print0 / xargs -0 like output, see barchive/bpath\n" \
" - SPACE\n" \
" - TAB\n" \
" - CR\n" \
" - LF"
class ArchiveFormatter(BaseFormatter):
def __init__(self, format):
self.format = partial_format(format, self.FIXED_KEYS)
def get_item_data(self, archive):
return {
# *name* is the key used by borg-info for the archive name, this makes the formats more compatible
'name': remove_surrogates(archive.name),
'barchive': archive.name,
'archive': remove_surrogates(archive.name),
'id': bin_to_hex(archive.id),
'time': format_time(to_localtime(archive.ts)),
# *start* is the key used by borg-info for this timestamp, this makes the formats more compatible
'start': format_time(to_localtime(archive.ts)),
}
@staticmethod
def keys_help():
return " - archive, name: archive name interpreted as text (might be missing non-text characters, see barchive)\n" \
" - barchive: verbatim archive name, can contain any character except NUL\n" \
" - time: time of creation of the archive\n" \
" - id: internal ID of the archive"
class ItemFormatter(BaseFormatter):
KEY_DESCRIPTIONS = {
'bpath': 'verbatim POSIX path, can contain any character except NUL',
'path': 'path interpreted as text (might be missing non-text characters, see bpath)',
'source': 'link target for links (identical to linktarget)',
'extra': 'prepends {source} with " -> " for soft links and " link to " for hard links',
'csize': 'compressed size',
'dsize': 'deduplicated size',
'dcsize': 'deduplicated compressed size',
'num_chunks': 'number of chunks in this file',
'unique_chunks': 'number of unique chunks in this file',
'health': 'either "healthy" (file ok) or "broken" (if file has all-zero replacement chunks)',
}
KEY_GROUPS = (
('type', 'mode', 'uid', 'gid', 'user', 'group', 'path', 'bpath', 'source', 'linktarget', 'flags'),
('size', 'csize', 'dsize', 'dcsize', 'num_chunks', 'unique_chunks'),
('mtime', 'ctime', 'atime', 'isomtime', 'isoctime', 'isoatime'),
tuple(sorted(hashlib.algorithms_guaranteed)),
('archiveid', 'archivename', 'extra'),
('health', )
)
KEYS_REQUIRING_CACHE = (
'dsize', 'dcsize', 'unique_chunks',
)
@classmethod
def available_keys(cls):
class FakeArchive:
fpr = name = ""
from .item import Item
fake_item = Item(mode=0, path='', user='', group='', mtime=0, uid=0, gid=0)
formatter = cls(FakeArchive, "")
keys = []
keys.extend(formatter.call_keys.keys())
keys.extend(formatter.get_item_data(fake_item).keys())
return keys
@classmethod
def keys_help(cls):
help = []
keys = cls.available_keys()
for key in cls.FIXED_KEYS:
keys.remove(key)
for group in cls.KEY_GROUPS:
for key in group:
keys.remove(key)
text = " - " + key
if key in cls.KEY_DESCRIPTIONS:
text += ": " + cls.KEY_DESCRIPTIONS[key]
help.append(text)
help.append("")
assert not keys, str(keys)
return "\n".join(help)
@classmethod
def format_needs_cache(cls, format):
format_keys = {f[1] for f in Formatter().parse(format)}
return any(key in cls.KEYS_REQUIRING_CACHE for key in format_keys)
def __init__(self, archive, format, *, json_lines=False):
self.archive = archive
self.json_lines = json_lines
static_keys = {
'archivename': archive.name,
'archiveid': archive.fpr,
}
static_keys.update(self.FIXED_KEYS)
self.format = partial_format(format, static_keys)
self.format_keys = {f[1] for f in Formatter().parse(format)}
self.call_keys = {
'size': self.calculate_size,
'csize': self.calculate_csize,
'dsize': partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.size),
'dcsize': partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.csize),
'num_chunks': self.calculate_num_chunks,
'unique_chunks': partial(self.sum_unique_chunks_metadata, lambda chunk: 1),
'isomtime': partial(self.format_time, 'mtime'),
'isoctime': partial(self.format_time, 'ctime'),
'isoatime': partial(self.format_time, 'atime'),
'mtime': partial(self.time, 'mtime'),
'ctime': partial(self.time, 'ctime'),
'atime': partial(self.time, 'atime'),
}
for hash_function in hashlib.algorithms_guaranteed:
self.add_key(hash_function, partial(self.hash_item, hash_function))
self.used_call_keys = set(self.call_keys) & self.format_keys
if self.json_lines:
self.item_data = {}
self.format_item = self.format_item_json
else:
self.item_data = static_keys
def format_item_json(self, item):
return json.dumps(self.get_item_data(item)) + '\n'
def add_key(self, key, callable_with_item):
self.call_keys[key] = callable_with_item
self.used_call_keys = set(self.call_keys) & self.format_keys
def get_item_data(self, item):
mode = stat.filemode(item.mode)
item_type = mode[0]
item_data = self.item_data
source = item.get('source', '')
extra = ''
if source:
source = remove_surrogates(source)
if item_type == 'l':
extra = ' -> %s' % source
else:
mode = 'h' + mode[1:]
extra = ' link to %s' % source
item_data['type'] = item_type
item_data['mode'] = mode
item_data['user'] = item.user or item.uid
item_data['group'] = item.group or item.gid
item_data['uid'] = item.uid
item_data['gid'] = item.gid
item_data['path'] = remove_surrogates(item.path)
if self.json_lines:
item_data['healthy'] = 'chunks_healthy' not in item
else:
item_data['bpath'] = item.path
item_data['extra'] = extra
item_data['health'] = 'broken' if 'chunks_healthy' in item else 'healthy'
item_data['source'] = source
item_data['linktarget'] = source
item_data['flags'] = item.get('bsdflags')
for key in self.used_call_keys:
item_data[key] = self.call_keys[key](item)
return item_data
def sum_unique_chunks_metadata(self, metadata_func, item):
"""
sum unique chunks metadata, a unique chunk is a chunk which is referenced globally as often as it is in the
item
item: The item to sum its unique chunks' metadata
metadata_func: A function that takes a parameter of type ChunkIndexEntry and returns a number, used to return
the metadata needed from the chunk
"""
chunk_index = self.archive.cache.chunks
chunks = item.get('chunks', [])
chunks_counter = Counter(c.id for c in chunks)
return sum(metadata_func(c) for c in chunks if chunk_index[c.id].refcount == chunks_counter[c.id])
def calculate_num_chunks(self, item):
return len(item.get('chunks', []))
def calculate_size(self, item):
# note: does not support hardlink slaves, they will be size 0
return item.get_size(compressed=False)
def calculate_csize(self, item):
# note: does not support hardlink slaves, they will be csize 0
return item.get_size(compressed=True)
def hash_item(self, hash_function, item):
if 'chunks' not in item:
return ""
hash = hashlib.new(hash_function)
for data in self.archive.pipeline.fetch_many([c.id for c in item.chunks]):
hash.update(data)
return hash.hexdigest()
def format_time(self, key, item):
return format_time(safe_timestamp(item.get(key) or item.mtime))
def time(self, key, item):
return safe_timestamp(item.get(key) or item.mtime)
class ChunkIteratorFileWrapper:
"""File-like wrapper for chunk iterators"""
def __init__(self, chunk_iterator, read_callback=None):
"""
*chunk_iterator* should be an iterator yielding bytes. These will be buffered
internally as necessary to satisfy .read() calls.
*read_callback* will be called with one argument, some byte string that has
just been read and will be subsequently returned to a caller of .read().
It can be used to update a progress display.
"""
self.chunk_iterator = chunk_iterator
self.chunk_offset = 0
self.chunk = b''
self.exhausted = False
self.read_callback = read_callback
def _refill(self):
remaining = len(self.chunk) - self.chunk_offset
if not remaining:
try:
chunk = next(self.chunk_iterator)
self.chunk = memoryview(chunk)
except StopIteration:
self.exhausted = True
return 0 # EOF
self.chunk_offset = 0
remaining = len(self.chunk)
return remaining
def _read(self, nbytes):
if not nbytes:
return b''
remaining = self._refill()
will_read = min(remaining, nbytes)
self.chunk_offset += will_read
return self.chunk[self.chunk_offset - will_read:self.chunk_offset]
def read(self, nbytes):
parts = []
while nbytes and not self.exhausted:
read_data = self._read(nbytes)
nbytes -= len(read_data)
parts.append(read_data)
if self.read_callback:
self.read_callback(read_data)
return b''.join(parts)
def open_item(archive, item):
"""Return file-like object for archived item (with chunks)."""
chunk_iterator = archive.pipeline.fetch_many([c.id for c in item.chunks])
return ChunkIteratorFileWrapper(chunk_iterator)
def file_status(mode):
if stat.S_ISREG(mode):
return 'A'
elif stat.S_ISDIR(mode):
return 'd'
elif stat.S_ISBLK(mode):
return 'b'
elif stat.S_ISCHR(mode):
return 'c'
elif stat.S_ISLNK(mode):
return 's'
elif stat.S_ISFIFO(mode):
return 'f'
return '?'
def hardlinkable(mode):
"""return True if we support hardlinked items of this type"""
return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
def chunkit(it, size):
"""
Chunk an iterator <it> into pieces of <size>.
>>> list(chunker('ABCDEFG', 3))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
"""
iterable = iter(it)
return iter(lambda: list(islice(iterable, size)), [])
def consume(iterator, n=None):
"""Advance the iterator n-steps ahead. If n is none, consume entirely."""
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
# GenericDirEntry, scandir_generic (c) 2012 Ben Hoyt
# from the python-scandir package (3-clause BSD license, just like us, so no troubles here)
# note: simplified version
class GenericDirEntry:
__slots__ = ('name', '_scandir_path', '_path')
def __init__(self, scandir_path, name):
self._scandir_path = scandir_path
self.name = name
self._path = None
@property
def path(self):
if self._path is None:
self._path = os.path.join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
assert not follow_symlinks
return os.stat(self.path, follow_symlinks=follow_symlinks)
def _check_type(self, type):
st = self.stat(False)
return stat.S_IFMT(st.st_mode) == type
def is_dir(self, follow_symlinks=True):
assert not follow_symlinks
return self._check_type(stat.S_IFDIR)
def is_file(self, follow_symlinks=True):
assert not follow_symlinks
return self._check_type(stat.S_IFREG)
def is_symlink(self):
return self._check_type(stat.S_IFLNK)
def inode(self):
st = self.stat(False)
return st.st_ino
def __repr__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.path)
def scandir_generic(path='.'):
"""Like os.listdir(), but yield DirEntry objects instead of returning a list of names."""
for name in sorted(os.listdir(path)):
yield GenericDirEntry(path, name)
try:
from os import scandir
except ImportError:
try:
# Try python-scandir on Python 3.4
from scandir import scandir
except ImportError:
# If python-scandir is not installed, then use a version that is just as slow as listdir.
scandir = scandir_generic
def scandir_inorder(path='.'):
return sorted(scandir(path), key=lambda dirent: dirent.inode())
def clean_lines(lines, lstrip=None, rstrip=None, remove_empty=True, remove_comments=True):
"""
clean lines (usually read from a config file):
1. strip whitespace (left and right), 2. remove empty lines, 3. remove comments.
note: only "pure comment lines" are supported, no support for "trailing comments".
:param lines: input line iterator (e.g. list or open text file) that gives unclean input lines
:param lstrip: lstrip call arguments or False, if lstripping is not desired
:param rstrip: rstrip call arguments or False, if rstripping is not desired
:param remove_comments: remove comment lines (lines starting with "#")
:param remove_empty: remove empty lines
:return: yields processed lines
"""
for line in lines:
if lstrip is not False:
line = line.lstrip(lstrip)
if rstrip is not False:
line = line.rstrip(rstrip)
if remove_empty and not line:
continue
if remove_comments and line.startswith('#'):
continue
yield line
class ErrorIgnoringTextIOWrapper(io.TextIOWrapper):
def read(self, n):
if not self.closed:
try:
return super().read(n)
except BrokenPipeError:
try:
super().close()
except OSError:
pass
return ''
def write(self, s):
if not self.closed:
try:
return super().write(s)
except BrokenPipeError:
try:
super().close()
except OSError:
pass
return len(s)
class SignalException(BaseException):
"""base class for all signal-based exceptions"""
class SigHup(SignalException):
"""raised on SIGHUP signal"""
class SigTerm(SignalException):
"""raised on SIGTERM signal"""
@contextlib.contextmanager
def signal_handler(sig, handler):
"""
when entering context, set up signal handler <handler> for signal <sig>.
when leaving context, restore original signal handler.
<sig> can bei either a str when giving a signal.SIGXXX attribute name (it
won't crash if the attribute name does not exist as some names are platform
specific) or a int, when giving a signal number.
<handler> is any handler value as accepted by the signal.signal(sig, handler).
"""
if isinstance(sig, str):
sig = getattr(signal, sig, None)
if sig is not None:
orig_handler = signal.signal(sig, handler)
try:
yield
finally:
if sig is not None:
signal.signal(sig, orig_handler)
def raising_signal_handler(exc_cls):
def handler(sig_no, frame):
# setting SIG_IGN avoids that an incoming second signal of this
# kind would raise a 2nd exception while we still process the
# exception handler for exc_cls for the 1st signal.
signal.signal(sig_no, signal.SIG_IGN)
raise exc_cls
return handler
def swidth_slice(string, max_width):
"""
Return a slice of *max_width* cells from *string*.
Negative *max_width* means from the end of string.
*max_width* is in units of character cells (or "columns").
Latin characters are usually one cell wide, many CJK characters are two cells wide.
"""
from .platform import swidth
reverse = max_width < 0
max_width = abs(max_width)
if reverse:
string = reversed(string)
current_swidth = 0
result = []
for character in string:
current_swidth += swidth(character)
if current_swidth > max_width:
break
result.append(character)
if reverse:
result.reverse()
return ''.join(result)
class BorgJsonEncoder(json.JSONEncoder):
def default(self, o):
from .repository import Repository
from .remote import RemoteRepository
from .archive import Archive
from .cache import Cache
if isinstance(o, Repository) or isinstance(o, RemoteRepository):
return {
'id': bin_to_hex(o.id),
'location': o._location.canonical_path(),
}
if isinstance(o, Archive):
return o.info()
if isinstance(o, Cache):
return {
'path': o.path,
'stats': o.stats(),
}
return super().default(o)
def basic_json_data(manifest, *, cache=None, extra=None):
key = manifest.key
data = extra or {}
data.update({
'repository': BorgJsonEncoder().default(manifest.repository),
'encryption': {
'mode': key.ARG_NAME,
},
})
data['repository']['last_modified'] = format_time(to_localtime(manifest.last_timestamp.replace(tzinfo=timezone.utc)))
if key.NAME.startswith('key file'):
data['encryption']['keyfile'] = key.find_key()
if cache:
data['cache'] = cache
return data
def json_dump(obj):
"""Dump using BorgJSONEncoder."""
return json.dumps(obj, sort_keys=True, indent=4, cls=BorgJsonEncoder)
def json_print(obj):
print(json_dump(obj))
def secure_erase(path):
"""Attempt to securely erase a file by writing random data over it before deleting it."""
with open(path, 'r+b') as fd:
length = os.stat(fd.fileno()).st_size
fd.write(os.urandom(length))
fd.flush()
os.fsync(fd.fileno())
os.unlink(path)
def truncate_and_unlink(path):
"""
Truncate and then unlink *path*.
Do not create *path* if it does not exist.
Open *path* for truncation in r+b mode (=O_RDWR|O_BINARY).
Use this when deleting potentially large files when recovering
from a VFS error such as ENOSPC. It can help a full file system
recover. Refer to the "File system interaction" section
in repository.py for further explanations.
"""
with open(path, 'r+b') as fd:
fd.truncate()
os.unlink(path)
def popen_with_error_handling(cmd_line: str, log_prefix='', **kwargs):
"""
Handle typical errors raised by subprocess.Popen. Return None if an error occurred,
otherwise return the Popen object.
*cmd_line* is split using shlex (e.g. 'gzip -9' => ['gzip', '-9']).
Log messages will be prefixed with *log_prefix*; if set, it should end with a space
(e.g. log_prefix='--some-option: ').
Does not change the exit code.
"""
assert not kwargs.get('shell'), 'Sorry pal, shell mode is a no-no'
try:
command = shlex.split(cmd_line)
if not command:
raise ValueError('an empty command line is not permitted')
except ValueError as ve:
logger.error('%s%s', log_prefix, ve)
return
logger.debug('%scommand line: %s', log_prefix, command)
try:
return subprocess.Popen(command, **kwargs)
except FileNotFoundError:
logger.error('%sexecutable not found: %s', log_prefix, command[0])
return
except PermissionError:
logger.error('%spermission denied: %s', log_prefix, command[0])
return
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read/write functionality for USGSDEM driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2008-2011, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import gdal
from osgeo import osr
sys.path.append('../pymod')
import gdaltest
###############################################################################
# Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/022gdeme
def usgsdem_1():
tst = gdaltest.GDALTest('USGSDEM', '022gdeme_truncated', 1, 1583)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('NAD27')
return tst.testOpen(check_prj=srs.ExportToWkt(),
check_gt=(-67.00041667, 0.00083333, 0.0, 50.000416667, 0.0, -0.00083333))
###############################################################################
# Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/114p01_0100_deme.dem
def usgsdem_2():
tst = gdaltest.GDALTest('USGSDEM', '114p01_0100_deme_truncated.dem', 1, 53864)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('NAD27')
return tst.testOpen(check_prj=srs.ExportToWkt(),
check_gt=(-136.25010416667, 0.000208333, 0.0, 59.25010416667, 0.0, -0.000208333))
###############################################################################
# Test truncated version of file that triggered bug #2348
def usgsdem_3():
tst = gdaltest.GDALTest('USGSDEM', '39079G6_truncated.dem', 1, 61424)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS72')
srs.SetUTM(17)
return tst.testOpen(check_prj=srs.ExportToWkt(),
check_gt=(606855.0, 30.0, 0.0, 4414605.0, 0.0, -30.0))
###############################################################################
# Test CreateCopy()
def usgsdem_4():
tst = gdaltest.GDALTest('USGSDEM', '39079G6_truncated.dem', 1, 61424,
options=['RESAMPLE=Nearest'])
return tst.testCreateCopy(check_gt=1, check_srs=1, vsimem=1)
###############################################################################
# Test CreateCopy() without any creation options
def usgsdem_5():
ds = gdal.Open('data/n43.dt0')
ds2 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/n43.dem', ds,
options=['RESAMPLE=Nearest'])
if ds.GetRasterBand(1).Checksum() != ds2.GetRasterBand(1).Checksum():
gdaltest.post_reason('Bad checksum.')
print(ds2.GetRasterBand(1).Checksum())
print(ds.GetRasterBand(1).Checksum())
ds2 = None
print(open('tmp/n43.dem', 'rb').read())
return 'fail'
gt1 = ds.GetGeoTransform()
gt2 = ds2.GetGeoTransform()
for i in range(6):
if abs(gt1[i] - gt2[i]) > 1e-5:
print('')
print('old = ', gt1)
print('new = ', gt2)
gdaltest.post_reason('Geotransform differs.')
return 'fail'
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
if ds2.GetProjectionRef() != srs.ExportToWkt():
gdaltest.post_reason('Bad SRS.')
return 'fail'
ds2 = None
return 'success'
###############################################################################
# Test CreateCopy() without a few creation options. Then create a new copy with TEMPLATE
# creation option and check that both files are binary identical.
def usgsdem_6():
ds = gdal.Open('data/n43.dt0')
ds2 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/file_1.dem', ds,
options=['PRODUCER=GDAL', 'OriginCode=GDAL', 'ProcessCode=A',
'RESAMPLE=Nearest'])
ds3 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/file_2.dem', ds2,
options=['TEMPLATE=tmp/file_1.dem', 'RESAMPLE=Nearest'])
del ds2
del ds3
f1 = open('tmp/file_1.dem', 'rb')
f2 = open('tmp/file_2.dem', 'rb')
# Skip the 40 first bytes because the dataset name will differ
f1.seek(40, 0)
f2.seek(40, 0)
data1 = f1.read()
data2 = f2.read()
if data1 != data2:
return 'fail'
f1.close()
f2.close()
return 'success'
###############################################################################
# Test CreateCopy() with CDED50K profile
def usgsdem_7():
ds = gdal.Open('data/n43.dt0')
# To avoid warning about 'Unable to find NTS mapsheet lookup file: NTS-50kindex.csv'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds2 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/000a00DEMz', ds,
options=['PRODUCT=CDED50K', 'TOPLEFT=80w,44n', 'RESAMPLE=Nearest', 'ZRESOLUTION=1.1', 'INTERNALNAME=GDAL'])
gdal.PopErrorHandler()
if ds2.RasterXSize != 1201 or ds2.RasterYSize != 1201:
gdaltest.post_reason('Bad image dimensions.')
print(ds2.RasterXSize)
print(ds2.RasterYSize)
return 'fail'
expected_gt = (-80.000104166666674, 0.000208333333333, 0, 44.000104166666667, 0, -0.000208333333333)
got_gt = ds2.GetGeoTransform()
for i in range(6):
if abs(expected_gt[i] - got_gt[i]) > 1e-5:
print('')
print('expected = ', expected_gt)
print('got = ', got_gt)
gdaltest.post_reason('Geotransform differs.')
return 'fail'
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('NAD83')
if ds2.GetProjectionRef() != srs.ExportToWkt():
gdaltest.post_reason('Bad SRS.')
return 'fail'
ds2 = None
return 'success'
###############################################################################
# Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/various.zip/39109h1.dem
# Undocumented format
def usgsdem_8():
tst = gdaltest.GDALTest('USGSDEM', '39109h1_truncated.dem', 1, 39443)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('NAD27')
srs.SetUTM(12)
return tst.testOpen(check_prj=srs.ExportToWkt(),
check_gt=(660055.0, 10.0, 0.0, 4429465.0, 0.0, -10.0))
###############################################################################
# Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/various.zip/4619old.dem
# Old format
def usgsdem_9():
tst = gdaltest.GDALTest('USGSDEM', '4619old_truncated.dem', 1, 10659)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('NAD27')
return tst.testOpen(check_prj=srs.ExportToWkt(),
check_gt=(18.99958333, 0.0008333, 0.0, 47.000416667, 0.0, -0.0008333))
###############################################################################
# https://github.com/OSGeo/gdal/issues/583
def usgsdem_with_extra_values_at_end_of_profile():
tst = gdaltest.GDALTest('USGSDEM', 'usgsdem_with_extra_values_at_end_of_profile.dem', 1, 56679)
return tst.testOpen()
###############################################################################
# Like Novato.dem of https://trac.osgeo.org/gdal/ticket/4901
def usgsdem_with_spaces_after_byte_864():
tst = gdaltest.GDALTest('USGSDEM', 'usgsdem_with_spaces_after_byte_864.dem', 1, 61078)
return tst.testOpen()
###############################################################################
# Cleanup
def usgsdem_cleanup():
try:
os.remove('tmp/n43.dem')
os.remove('tmp/n43.dem.aux.xml')
os.remove('tmp/file_1.dem')
os.remove('tmp/file_1.dem.aux.xml')
os.remove('tmp/file_2.dem')
os.remove('tmp/file_2.dem.aux.xml')
os.remove('tmp/000a00DEMz')
os.remove('tmp/000a00DEMz.aux.xml')
except OSError:
pass
return 'success'
gdaltest_list = [
usgsdem_1,
usgsdem_2,
usgsdem_3,
usgsdem_4,
usgsdem_5,
usgsdem_6,
usgsdem_7,
usgsdem_8,
usgsdem_9,
usgsdem_with_extra_values_at_end_of_profile,
usgsdem_with_spaces_after_byte_864,
usgsdem_cleanup]
if __name__ == '__main__':
gdaltest.setup_run('usgsdem')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
|
nilq/baby-python
|
python
|
import tensorflow as tf
class GLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs, **kwargs):
channels = tf.shape(inputs)[-1]
nb_split_channels = channels // 2
x_1 = inputs[:, :, :, :nb_split_channels]
x_2 = inputs[:, :, :, nb_split_channels:]
return x_1 * tf.nn.sigmoid(x_2)
|
nilq/baby-python
|
python
|
import aiohttp
import asyncio
import sys
import json
import argparse
async def upload_cast_info(session, addr, cast):
async with session.post(addr + "/wrk2-api/cast-info/write", json=cast) as resp:
return await resp.text()
async def upload_plot(session, addr, plot):
async with session.post(addr + "/wrk2-api/plot/write", json=plot) as resp:
return await resp.text()
async def upload_movie_info(session, addr, movie):
async with session.post(addr + "/wrk2-api/movie-info/write", json=movie) as resp:
return await resp.text()
async def register_movie(session, addr, movie):
params = {
"title": movie["title"],
"movie_id": movie["movie_id"]
}
async with session.post(addr + "/wrk2-api/movie/register", data=params) as resp:
return await resp.text()
async def write_cast_info(addr, raw_casts):
idx = 0
tasks = []
conn = aiohttp.TCPConnector(limit=200)
async with aiohttp.ClientSession(connector=conn) as session:
for raw_cast in raw_casts:
try:
cast = dict()
cast["cast_info_id"] = raw_cast["id"]
cast["name"] = raw_cast["name"]
cast["gender"] = True if raw_cast["gender"] == 2 else False
cast["intro"] = raw_cast["biography"]
task = asyncio.ensure_future(upload_cast_info(session, addr, cast))
tasks.append(task)
idx += 1
except:
print("Warning: cast info missing!")
if idx % 200 == 0:
resps = await asyncio.gather(*tasks)
print(idx, "casts finished")
resps = await asyncio.gather(*tasks)
print(idx, "casts finished")
async def write_movie_info(addr, raw_movies):
idx = 0
tasks = []
conn = aiohttp.TCPConnector(limit=200)
async with aiohttp.ClientSession(connector=conn) as session:
for raw_movie in raw_movies:
movie = dict()
casts = list()
movie["movie_id"] = str(raw_movie["id"])
movie["title"] = raw_movie["title"]
movie["plot_id"] = raw_movie["id"]
for raw_cast in raw_movie["cast"]:
try:
cast = dict()
cast["cast_id"] = raw_cast["cast_id"]
cast["character"] = raw_cast["character"]
cast["cast_info_id"] = raw_cast["id"]
casts.append(cast)
except:
print("Warning: cast info missing!")
movie["casts"] = casts
movie["thumbnail_ids"] = [raw_movie["poster_path"]]
movie["photo_ids"] = []
movie["video_ids"] = []
movie["avg_rating"] = raw_movie["vote_average"]
movie["num_rating"] = raw_movie["vote_count"]
task = asyncio.ensure_future(upload_movie_info(session, addr, movie))
tasks.append(task)
plot = dict()
plot["plot_id"] = raw_movie["id"]
plot["plot"] = raw_movie["overview"]
task = asyncio.ensure_future(upload_plot(session, addr, plot))
tasks.append(task)
task = asyncio.ensure_future(register_movie(session, addr, movie))
tasks.append(task)
idx += 1
if idx % 200 == 0:
resps = await asyncio.gather(*tasks)
print(idx, "movies finished")
resps = await asyncio.gather(*tasks)
print(idx, "movies finished")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cast", action="store", dest="cast_filename",
type=str, default="../datasets/tmdb/casts.json")
parser.add_argument("-m", "--movie", action="store", dest="movie_filename",
type=str, default="../datasets/tmdb/movies.json")
args = parser.parse_args()
with open(args.cast_filename, 'r') as cast_file:
raw_casts = json.load(cast_file)
addr = "http://127.0.0.1:8080"
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(write_cast_info(addr, raw_casts))
loop.run_until_complete(future)
with open(args.movie_filename, 'r') as movie_file:
raw_movies = json.load(movie_file)
addr = "http://127.0.0.1:8080"
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(write_movie_info(addr, raw_movies))
loop.run_until_complete(future)
|
nilq/baby-python
|
python
|
"""Tests experiment modules."""
|
nilq/baby-python
|
python
|
import pytest
import json
from pytz import UnknownTimeZoneError
from tzlocal import get_localzone
from O365.connection import Connection, Protocol, MSGraphProtocol, MSOffice365Protocol, DEFAULT_SCOPES
TEST_SCOPES = ['Contacts.Read.Shared', 'Mail.Send.Shared', 'User.Read', 'Contacts.ReadWrite.Shared', 'Mail.ReadWrite.Shared', 'Mail.Read.Shared', 'Contacts.Read', 'Sites.ReadWrite.All', 'Mail.Send', 'Mail.ReadWrite', 'offline_access', 'Mail.Read', 'Contacts.ReadWrite', 'Files.ReadWrite.All', 'Calendars.ReadWrite', 'User.ReadBasic.All']
class TestProtocol:
def setup_class(self):
self.proto = Protocol(protocol_url="testing", api_version="0.0")
def teardown_class(self):
pass
def test_blank_protocol(self):
with pytest.raises(ValueError):
p = Protocol()
def test_to_api_case(self):
assert(self.proto.to_api_case("CaseTest") == "case_test")
def test_get_scopes_for(self):
with pytest.raises(ValueError):
self.proto.get_scopes_for(123) # should error sicne it's not a list or tuple.
assert(self.proto.get_scopes_for(['mailbox']) == ['mailbox'])
assert(self.proto.get_scopes_for(None) == [])
assert(self.proto.get_scopes_for('mailbox') == ['mailbox'])
self.proto._oauth_scopes = DEFAULT_SCOPES
assert(self.proto.get_scopes_for(['mailbox']) == ['Mail.Read'])
# This test verifies that the scopes in the default list don't change
#without us noticing. It makes sure that all the scopes we get back are
#in the current set of scopes we expect. And all the scopes that we are
#expecting are in the scopes we are getting back. The list contains the
#same stuff but may not be in the same order and are therefore not equal
scopes = self.proto.get_scopes_for(None)
for scope in scopes:
assert(scope in TEST_SCOPES)
for scope in TEST_SCOPES:
assert(scope in scopes)
assert(self.proto.get_scopes_for('mailbox') == ['Mail.Read'])
def test_prefix_scope(self):
assert(self.proto.prefix_scope('Mail.Read') == 'Mail.Read')
assert(self.proto.prefix_scope(('Mail.Read',)) == 'Mail.Read')
self.proto.protocol_scope_prefix = 'test_prefix_'
assert(self.proto.prefix_scope(('Mail.Read',)) == 'Mail.Read')
assert(self.proto.prefix_scope('test_prefix_Mail.Read') == 'test_prefix_Mail.Read')
assert(self.proto.prefix_scope('Mail.Read') == 'test_prefix_Mail.Read')
def test_decendant_MSOffice365Protocol(self):
# Basically we just test that it can create the class w/o erroring.
msp = MSOffice365Protocol()
# Make sure these don't change without going noticed.
assert(msp.keyword_data_store['message_type'] == 'Microsoft.OutlookServices.Message')
assert(msp.keyword_data_store['file_attachment_type'] == '#Microsoft.OutlookServices.FileAttachment')
assert(msp.keyword_data_store['item_attachment_type'] == '#Microsoft.OutlookServices.ItemAttachment')
assert(msp.max_top_value == 999)
|
nilq/baby-python
|
python
|
import os
import dgl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import numpy as np
from sklearn.model_selection import KFold
import digital_patient
from digital_patient.conformal.base import RegressorAdapter
from digital_patient.conformal.icp import IcpRegressor
from digital_patient.conformal.nc import RegressorNc
from examples.load_data2 import load_physiology
def main():
# create directory to save results
output_dir = 'cardiac-model'
data_dir = os.path.join(output_dir, 'data')
result_dir = os.path.join(output_dir, 'results')
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
# load data
df = pd.read_csv(os.path.join(data_dir, 'data.csv'), index_col=0)
var_names = [name.split(' ')[0] for name in df.columns]
x = df.values.astype('float32')
reps = 10
x = np.tile(x.T, reps=reps).T
# # check
# plt.figure()
# plt.plot(x[:500, 0], x[:500, 1])
# plt.show()
# # scale data
# scaler = StandardScaler()
# scaler = scaler.fit(x)
# x = scaler.transform(x)
# create sample lists
samples = []
labels = []
window_size = 1000
for batch in range(x.shape[0] - 2 * window_size):
print(f"{batch} - {batch + window_size - 2} -> {batch + window_size - 1} - {batch + 2 * window_size - 3}")
samples.append(x[batch:batch + window_size - 2])
labels.append(x[batch + window_size - 1:batch + 2 * window_size - 3])
samples = np.array(samples)
labels = np.array(labels)
# create CV splits
skf = KFold(n_splits=5, shuffle=True)
trainval_index, test_index = [split for split in skf.split(samples)][0]
skf2 = KFold(n_splits=5, shuffle=True)
train_index, val_index = [split for split in skf2.split(np.arange(trainval_index.size))][0]
x_train, x_val = samples[trainval_index[train_index]], samples[trainval_index[val_index]]
y_train, y_val = labels[trainval_index[train_index]], labels[trainval_index[val_index]]
x_test, y_test = samples[test_index], labels[test_index]
# create edge list
edge_list = []
for i in range(df.shape[1]):
for j in range(df.shape[1]):
edge_list.append((i, j))
# instantiate a digital patient model
G = dgl.DGLGraph(edge_list)
dp = digital_patient.DigitalPatient(G, epochs=20, lr=0.01, window_size=window_size-2)
# # plot the graph corresponding to the digital patient
# nx_G = dp.G.to_networkx()
# pos = nx.circular_layout(nx_G)
# node_labels = {}
# for i, cn in enumerate(var_names):
# node_labels[i] = cn
# plt.figure()
# nx.draw(nx_G, pos, alpha=0.3)
# nx.draw_networkx_labels(nx_G, pos, labels=node_labels)
# plt.tight_layout()
# plt.savefig(f'{result_dir}/graph.png')
# plt.show()
# instantiate the model, train and predict
dp.fit(x_train, y_train)
predictions = dp.predict(x_test)
# plot the results
sns.set_style('whitegrid')
for i, name in enumerate(var_names):
for j in range(predictions.shape[0]):
xi = y_test[j, :, i]
pi = predictions[j, :, i]
if name == 't':
continue
ti = labels[0, :, 0]
# tik = np.repeat(ti, pi.shape[0])
pik = np.hstack(pi)
plt.figure()
plt.plot(ti, xi, label='true')
for pik in pi:
plt.plot(ti, pik, c='r', alpha=0.2)
# sns.lineplot(tik, pik, alpha=0.2, ci=0.9)
# plt.fill_between(ti, pi[:, 0], pi[:, 1], alpha=0.2, label='predicted')
plt.title(name)
plt.legend()
# plt.ylabel(ylabel)
plt.xlabel('time')
plt.tight_layout()
plt.savefig(f'{result_dir}/{name}_{j}.png')
plt.show()
break
return
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
class Solution:
def minSumOfLengths(self, arr: List[int], target: int) -> int:
# need to know all subs
n = len(arr)
left = [math.inf] * n
seen = {0 : -1}
cur = 0
for i, val in enumerate(arr):
cur += val
if i > 0:
left[i] = left[i - 1]
if cur - target in seen:
left[i] = min(left[i], i - seen[cur - target])
seen[cur] = i
ans = math.inf
cur = 0
seen = {0 : n}
old = math.inf
for i in reversed(range(n)):
cur += arr[i]
best = old
if cur - target in seen:
best = min(best, seen[cur - target] - i)
if i > 0 and left[i - 1] != -1:
ans = min(ans, left[i - 1] + best)
seen[cur] = i
old = best
return ans if ans != math.inf else -1
|
nilq/baby-python
|
python
|
# te18/leaderboard
# https://github.com/te18/leaderboard
from flask import Flask, render_template
app = Flask(__name__)
# error handlers
@app.errorhandler(400)
def error_400(e):
return render_template("errors/400.html"), 400
@app.errorhandler(404)
def error_404(e):
return render_template("errors/404.html"), 400
@app.errorhandler(500)
def error_500(e):
return render_template("errors/500.html"), 500
# main routes
@app.route("/")
def index():
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""The Mozilla Firefox history event formatter."""
from __future__ import unicode_literals
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
class FirefoxBookmarkAnnotationFormatter(interface.ConditionalEventFormatter):
"""The Firefox bookmark annotation event formatter."""
DATA_TYPE = 'firefox:places:bookmark_annotation'
FORMAT_STRING_PIECES = [
'Bookmark Annotation: [{content}]',
'to bookmark [{title}]',
'({url})']
FORMAT_STRING_SHORT_PIECES = ['Bookmark Annotation: {title}']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxBookmarkFolderFormatter(interface.EventFormatter):
"""The Firefox bookmark folder event formatter."""
DATA_TYPE = 'firefox:places:bookmark_folder'
FORMAT_STRING = '{title}'
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxBookmarkFormatter(interface.ConditionalEventFormatter):
"""The Firefox URL bookmark event formatter."""
DATA_TYPE = 'firefox:places:bookmark'
FORMAT_STRING_PIECES = [
'Bookmark {type}',
'{title}',
'({url})',
'[{places_title}]',
'visit count {visit_count}']
FORMAT_STRING_SHORT_PIECES = [
'Bookmarked {title}',
'({url})']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxPageVisitFormatter(interface.ConditionalEventFormatter):
"""The Firefox page visited event formatter."""
DATA_TYPE = 'firefox:places:page_visited'
# Transitions defined in the source file:
# src/toolkit/components/places/nsINavHistoryService.idl
# Also contains further explanation into what each of these settings mean.
_URL_TRANSITIONS = {
1: 'LINK',
2: 'TYPED',
3: 'BOOKMARK',
4: 'EMBED',
5: 'REDIRECT_PERMANENT',
6: 'REDIRECT_TEMPORARY',
7: 'DOWNLOAD',
8: 'FRAMED_LINK',
}
_URL_TRANSITIONS.setdefault('UNKOWN')
# TODO: Make extra conditional formatting.
FORMAT_STRING_PIECES = [
'{url}',
'({title})',
'[count: {visit_count}]',
'Host: {host}',
'{extra_string}']
FORMAT_STRING_SHORT_PIECES = ['URL: {url}']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
# pylint: disable=unused-argument
def GetMessages(self, formatter_mediator, event_data):
"""Determines the formatted message strings for the event data.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event_data (EventData): event data.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event data cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event_data.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event_data.data_type))
event_values = event_data.CopyToDict()
visit_type = event_values.get('visit_type', 0)
transition = self._URL_TRANSITIONS.get(visit_type, None)
if transition:
transition_str = 'Transition: {0!s}'.format(transition)
extra = event_values.get('extra', None)
if extra:
if transition:
extra.append(transition_str)
event_values['extra_string'] = ' '.join(extra)
elif transition:
event_values['extra_string'] = transition_str
return self._ConditionalFormatMessages(event_values)
class FirefoxDowloadFormatter(interface.EventFormatter):
"""The Firefox download event formatter."""
DATA_TYPE = 'firefox:downloads:download'
FORMAT_STRING = (
'{url} ({full_path}). Received: {received_bytes} bytes '
'out of: {total_bytes} bytes.')
FORMAT_STRING_SHORT = '{full_path} downloaded ({received_bytes} bytes)'
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
manager.FormattersManager.RegisterFormatters([
FirefoxBookmarkAnnotationFormatter, FirefoxBookmarkFolderFormatter,
FirefoxBookmarkFormatter, FirefoxPageVisitFormatter,
FirefoxDowloadFormatter])
|
nilq/baby-python
|
python
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import os
import yaml
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def getctype(typename):
flag = False
if "Const[" in typename:
flag = True
typename = typename[len("Const[") : -1]
arraycount = 0
while "List[" in typename:
arraycount += 1
typename = typename[len("List[") : -1]
typename = typename + "*" * arraycount
if flag:
typename = "const " + typename
return typename
if __name__ == "__main__":
with open(
os.path.join(CURRENT_DIR, "..", "include", "awkward", "kernels.h"), "w"
) as header:
header.write("// AUTO GENERATED: DO NOT EDIT BY HAND!\n")
header.write(
"// To regenerate file, execute - python dev/generate-kernelheader.py\n\n"
)
header.write(
'#ifndef AWKWARD_KERNELS_H_\n#define AWKWARD_KERNELS_H_\n\n#include "awkward/common.h"\n\nextern "C" {\n'
)
with open(
os.path.join(CURRENT_DIR, "..", "kernel-specification.yml")
) as specfile:
indspec = yaml.safe_load(specfile)["kernels"]
for spec in indspec:
for childfunc in spec["specializations"]:
header.write(" " * 2 + "EXPORT_SYMBOL ERROR\n")
header.write(" " * 2 + childfunc["name"] + "(\n")
for i, arg in enumerate(childfunc["args"]):
header.write(
" " * 4 + getctype(arg["type"]) + " " + arg["name"]
)
if i == (len(childfunc["args"]) - 1):
header.write(");\n")
else:
header.write(",\n")
header.write("\n")
header.write("}\n#endif\n")
|
nilq/baby-python
|
python
|
from enum import Enum
import random
class Color(Enum):
YELLOW = 0
RED = 1
BLUE = 2
GREEN = 3
NONE = -1
class Player(object):
def __init__(self, name, uid):
self.cards = []
self.name = name
self.id = uid
class Card(object):
def __init__(self, color):
self.id = random.randrange(0,100000000000)
self.color = color
class Normal(Card):
def __init__(self, color, digit):
super().__init__(color)
self.digit = digit
self.link = "%s-%d.png" % (Color(self.color).name.lower(), self.digit)
def __repr__(self):
return "%s %d" % (self.color.name, self.digit)
class Pull2(Card):
def __init__(self, color):
super().__init__(color)
self.link = "%s-%s.png" % (Color(self.color).name.lower(), "Pull2" )
def __repr__(self):
return "2 ZIEHEN (%s)" % self.color.name
class LoseTurn(Card):
def __init__(self, color):
super().__init__(color)
self.link = "%s-%s.png" % (Color(self.color).name.lower(), "LooseTurn")
def __repr__(self):
return "AUSSETZEN (%s)" % self.color.name
class Retour(Card):
def __init__(self, color):
super().__init__(color)
self.link = "%s-%s.png" % (Color(self.color).name.lower(), "Retour")
def __repr__(self):
return "RICHTUNGSWECHSEL (%s)" % self.color.name
class ChangeColor(Card):
def __init__(self):
super().__init__(Color.NONE)
self.link = "ChangeColor.png"
def __repr__(self):
return "Wünscher: %s" % self.color.name
class Pull4(Card):
def __init__(self):
super().__init__(Color.NONE)
self.link = "Pull4.png"
def __repr__(self):
return "4 ZIEHEN! und %s " % self.color.name
|
nilq/baby-python
|
python
|
import os
from twisted.application import service
from twisted.python.filepath import FilePath
from buildslave.bot import BuildSlave
basedir = '.'
rotateLength = 10000000
maxRotatedFiles = 10
# if this is a relocatable tac file, get the directory containing the TAC
if basedir == '.':
import os.path
basedir = os.path.abspath(os.path.dirname(__file__))
# note: this line is matched against to check that this is a buildslave
# directory; do not edit it.
application = service.Application('buildslave')
try:
from twisted.python.logfile import LogFile
from twisted.python.log import ILogObserver, FileLogObserver
logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength,
maxRotatedFiles=maxRotatedFiles)
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
except ImportError:
# probably not yet twisted 8.2.0 and beyond, can't set log yet
pass
buildmaster_host = '{{host}}'
port = {{port}}
slavename = '{{name}}'
passwd = '{{password}}'
keepalive = 600
usepty = False
umask = 0022
maxdelay = 300
s = BuildSlave(buildmaster_host, port, slavename, passwd, basedir,
keepalive, usepty, umask=umask, maxdelay=maxdelay,
allow_shutdown=False)
s.setServiceParent(application)
|
nilq/baby-python
|
python
|
class LightCommand(object):
pass
|
nilq/baby-python
|
python
|
"""Package for all views."""
from .control import Control
from .dashboard import Dashboard
from .events import Events
from .live import Live
from .liveness import Ping, Ready
from .login import Login
from .logout import Logout
from .main import Main
from .resultat import Resultat, ResultatHeat
from .start import Start
from .timing import Timing
|
nilq/baby-python
|
python
|
"""MAGI Validators."""
|
nilq/baby-python
|
python
|
# Author: Nathan Trouvain at 16/08/2021 <nathan.trouvain@inria.fr>
# Licence: MIT License
# Copyright: Xavier Hinaut (2018) <xavier.hinaut@inria.fr>
from functools import partial
import numpy as np
from scipy import linalg
from .utils import (readout_forward, _initialize_readout,
_prepare_inputs_for_learning)
from ..base.node import Node
from ..base.types import global_dtype
def _solve_ridge(XXT, YXT, ridge):
return linalg.solve(XXT + ridge, YXT.T, assume_a="sym")
def partial_backward(readout: Node, X_batch, Y_batch=None):
transient = readout.transient
X, Y = _prepare_inputs_for_learning(X_batch, Y_batch,
transient=transient,
bias=readout.input_bias,
allow_reshape=True)
xxt = X.T.dot(X)
yxt = Y.T.dot(X)
XXT = readout.get_buffer("XXT")
YXT = readout.get_buffer("YXT")
# This is not thread-safe, apparently, using Numpy memmap as buffers
# ok for parallelization then with a lock (see ESN object)
XXT += xxt
YXT += yxt
def backward(readout: Node, X=None, Y=None):
ridge = readout.ridge
XXT = readout.get_buffer("XXT")
YXT = readout.get_buffer("YXT")
input_dim = readout.input_dim
if readout.input_bias:
input_dim += 1
ridgeid = (ridge * np.eye(input_dim, dtype=global_dtype))
Wout_raw = _solve_ridge(XXT, YXT, ridgeid)
if readout.input_bias:
Wout, bias = Wout_raw[1:, :], Wout_raw[0, :][np.newaxis, :]
readout.set_param("Wout", Wout)
readout.set_param("bias", bias)
else:
readout.set_param("Wout", Wout_raw)
def initialize(readout: Node,
x=None,
y=None,
Wout_init=None):
_initialize_readout(readout, x, y, bias=readout.input_bias,
init_func=Wout_init)
def initialize_buffers(readout):
# create memmaped buffers for matrices X.X^T and Y.X^T pre-computed
# in parallel for ridge regression
# ! only memmap can be used ! Impossible to share Numpy arrays with
# different processes in r/w mode otherwise (with proper locking)
input_dim = readout.input_dim
output_dim = readout.output_dim
if readout.input_bias:
input_dim += 1
readout.create_buffer("XXT", (input_dim,
input_dim))
readout.create_buffer("YXT", (output_dim,
input_dim))
class Ridge(Node):
def __init__(self, output_dim=None, ridge=0.0, transient=0, Wout=None,
input_bias=True, name=None):
super(Ridge, self).__init__(params={"Wout": None, "bias": None},
hypers={"ridge": ridge,
"transient": transient,
"input_bias": input_bias},
forward=readout_forward,
partial_backward=partial_backward,
backward=backward,
output_dim=output_dim,
initializer=partial(initialize,
Wout_init=Wout),
buffers_initializer=initialize_buffers,
name=name)
|
nilq/baby-python
|
python
|
"""
This playbook checks for the presence of the Risk Response workbook and updates tasks or leaves generic notes. "Risk_notable_verdict" recommends this playbook as a second phase of the investigation. Additionally, this playbook can be used in ad-hoc investigations or incorporated into custom workbooks.
"""
import phantom.rules as phantom
import json
from datetime import datetime, timedelta
def on_start(container):
phantom.debug('on_start() called')
# call 'workbook_list' block
workbook_list(container=container)
return
def workbook_list(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("workbook_list() called")
parameters = [{}]
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/workbook_list", parameters=parameters, name="workbook_list", callback=workbook_decision)
return
def workbook_decision(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("workbook_decision() called")
################################################################################
# Determines if the workbook Risk Response is present and available for use.
################################################################################
# check for 'if' condition 1
found_match_1 = phantom.decision(
container=container,
conditions=[
["workbook_list:custom_function_result.data.*.name", "==", "Risk Response"]
])
# call connected blocks if condition 1 matched
if found_match_1:
workbook_add(action=action, success=success, container=container, results=results, handle=handle)
return
# check for 'else' condition 2
join_risk_notable_review_indicators(action=action, success=success, container=container, results=results, handle=handle)
return
def workbook_add(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("workbook_add() called")
id_value = container.get("id", None)
parameters = []
parameters.append({
"workbook": "Risk Response",
"container": id_value,
"start_workbook": "true",
"check_for_existing_workbook": "true",
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/workbook_add", parameters=parameters, name="workbook_add", callback=workbook_start_task)
return
def workbook_start_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("workbook_start_task() called")
id_value = container.get("id", None)
parameters = []
parameters.append({
"owner": None,
"status": "in_progress",
"container": id_value,
"task_name": "Block Indicators",
"note_title": None,
"note_content": None,
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="workbook_start_task", callback=join_risk_notable_review_indicators)
return
def join_risk_notable_review_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("join_risk_notable_review_indicators() called")
# if the joined function has already been called, do nothing
if phantom.get_run_data(key="join_risk_notable_review_indicators_called"):
return
# save the state that the joined function has now been called
phantom.save_run_data(key="join_risk_notable_review_indicators_called", value="risk_notable_review_indicators")
# call connected block "risk_notable_review_indicators"
risk_notable_review_indicators(container=container, handle=handle)
return
def risk_notable_review_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("risk_notable_review_indicators() called")
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
# call playbook "community/risk_notable_review_indicators", returns the playbook_run_id
playbook_run_id = phantom.playbook("community/risk_notable_review_indicators", container=container, name="risk_notable_review_indicators", callback=indicator_get_by_tag)
return
def risk_notable_block_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("risk_notable_block_indicators() called")
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
# call playbook "community/risk_notable_block_indicators", returns the playbook_run_id
playbook_run_id = phantom.playbook("community/risk_notable_block_indicators", container=container, name="risk_notable_block_indicators", callback=note_decision_1)
return
def join_risk_notable_protect_assets_and_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("join_risk_notable_protect_assets_and_users() called")
# if the joined function has already been called, do nothing
if phantom.get_run_data(key="join_risk_notable_protect_assets_and_users_called"):
return
# save the state that the joined function has now been called
phantom.save_run_data(key="join_risk_notable_protect_assets_and_users_called", value="risk_notable_protect_assets_and_users")
# call connected block "risk_notable_protect_assets_and_users"
risk_notable_protect_assets_and_users(container=container, handle=handle)
return
def risk_notable_protect_assets_and_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("risk_notable_protect_assets_and_users() called")
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
# call playbook "community/risk_notable_protect_assets_and_users", returns the playbook_run_id
playbook_run_id = phantom.playbook("community/risk_notable_protect_assets_and_users", container=container, name="risk_notable_protect_assets_and_users", callback=note_decision_2)
return
def note_decision_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("note_decision_1() called")
################################################################################
# Determine if a note was left by the previous playbook and if the Risk Mitigate
# workbook should be used.
################################################################################
# check for 'if' condition 1
found_match_1 = phantom.decision(
container=container,
logical_operator="and",
conditions=[
["risk_notable_block_indicators:playbook_output:note_title", "!=", ""],
["risk_notable_block_indicators:playbook_output:note_content", "!=", ""],
["workbook_list:custom_function_result.data.*.name", "==", "Risk Mitigate"]
])
# call connected blocks if condition 1 matched
if found_match_1:
update_block_task(action=action, success=success, container=container, results=results, handle=handle)
return
# check for 'elif' condition 2
found_match_2 = phantom.decision(
container=container,
logical_operator="and",
conditions=[
["risk_notable_block_indicators:playbook_output:note_title", "!=", ""],
["risk_notable_block_indicators:playbook_output:note_content", "!=", ""]
])
# call connected blocks if condition 2 matched
if found_match_2:
add_block_note(action=action, success=success, container=container, results=results, handle=handle)
return
return
def update_block_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("update_block_task() called")
id_value = container.get("id", None)
risk_notable_block_indicators_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_title"])
risk_notable_block_indicators_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_content"])
parameters = []
# build parameters list for 'update_block_task' call
for risk_notable_block_indicators_output_note_title_item in risk_notable_block_indicators_output_note_title:
for risk_notable_block_indicators_output_note_content_item in risk_notable_block_indicators_output_note_content:
parameters.append({
"owner": None,
"status": "closed",
"container": id_value,
"task_name": "Review and Block Indicators",
"note_title": risk_notable_block_indicators_output_note_title_item[0],
"note_content": risk_notable_block_indicators_output_note_content_item[0],
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="update_block_task", callback=start_protect_task)
return
def start_protect_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("start_protect_task() called")
id_value = container.get("id", None)
parameters = []
parameters.append({
"owner": None,
"status": "in_progress",
"container": id_value,
"task_name": "Protect Assets and Users",
"note_title": None,
"note_content": None,
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="start_protect_task", callback=join_risk_notable_protect_assets_and_users)
return
def add_block_note(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("add_block_note() called")
################################################################################
# Custom code to handle leaving a note with a dynamic title and content when the
# Risk Mitigate workbook is not present.
################################################################################
risk_notable_block_indicators_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_title"])
risk_notable_block_indicators_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_content"])
risk_notable_block_indicators_output_note_title_values = [item[0] for item in risk_notable_block_indicators_output_note_title]
risk_notable_block_indicators_output_note_content_values = [item[0] for item in risk_notable_block_indicators_output_note_content]
################################################################################
## Custom Code Start
################################################################################
note_title = risk_notable_block_indicators_output_note_title_values
note_content = risk_notable_block_indicators_output_note_content_values
for title, content in zip(note_title, note_content):
phantom.add_note(container=container, title=title, content=content, note_type="general", note_format="markdown")
################################################################################
## Custom Code End
################################################################################
join_risk_notable_protect_assets_and_users(container=container)
return
def note_decision_2(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("note_decision_2() called")
################################################################################
# Determine if a note was left by the previous playbook and if the Risk Mitigate
# workbook should be used.
################################################################################
# check for 'if' condition 1
found_match_1 = phantom.decision(
container=container,
logical_operator="and",
conditions=[
["risk_notable_protect_assets_and_users:playbook_output:note_title", "!=", ""],
["risk_notable_protect_assets_and_users:playbook_output:note_content", "!=", ""],
["workbook_list:custom_function_result.data.*.name", "==", "Risk Mitigate"]
])
# call connected blocks if condition 1 matched
if found_match_1:
update_protect_task(action=action, success=success, container=container, results=results, handle=handle)
return
# check for 'elif' condition 2
found_match_2 = phantom.decision(
container=container,
logical_operator="and",
conditions=[
["risk_notable_protect_assets_and_users:playbook_output:note_title", "!=", ""],
["risk_notable_protect_assets_and_users:playbook_output:note_content", "!=", ""]
])
# call connected blocks if condition 2 matched
if found_match_2:
add_protect_note(action=action, success=success, container=container, results=results, handle=handle)
return
return
def update_protect_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("update_protect_task() called")
id_value = container.get("id", None)
risk_notable_protect_assets_and_users_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_title"])
risk_notable_protect_assets_and_users_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_content"])
parameters = []
# build parameters list for 'update_protect_task' call
for risk_notable_protect_assets_and_users_output_note_title_item in risk_notable_protect_assets_and_users_output_note_title:
for risk_notable_protect_assets_and_users_output_note_content_item in risk_notable_protect_assets_and_users_output_note_content:
parameters.append({
"owner": None,
"status": "complete",
"container": id_value,
"task_name": "Protect Assets and Users",
"note_title": risk_notable_protect_assets_and_users_output_note_title_item[0],
"note_content": risk_notable_protect_assets_and_users_output_note_content_item[0],
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="update_protect_task")
return
def add_protect_note(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("add_protect_note() called")
################################################################################
# Custom code to handle leaving a note with a dynamic title and content when the
# Risk Mitigate workbook is not present.
################################################################################
risk_notable_protect_assets_and_users_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_title"])
risk_notable_protect_assets_and_users_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_content"])
risk_notable_protect_assets_and_users_output_note_title_values = [item[0] for item in risk_notable_protect_assets_and_users_output_note_title]
risk_notable_protect_assets_and_users_output_note_content_values = [item[0] for item in risk_notable_protect_assets_and_users_output_note_content]
################################################################################
## Custom Code Start
################################################################################
note_title = risk_notable_protect_assets_and_users_output_note_title_values
note_content = risk_notable_protect_assets_and_users_output_note_content_values
for title, content in zip(note_title, note_content):
phantom.add_note(container=container, title=title, content=content, note_type="general", note_format="markdown")
################################################################################
## Custom Code End
################################################################################
return
def decision_4(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("decision_4() called")
# check for 'if' condition 1
found_match_1 = phantom.decision(
container=container,
conditions=[
["indicator_get_by_tag:custom_function_result.data.*.indicator_value", "!=", ""]
])
# call connected blocks if condition 1 matched
if found_match_1:
risk_notable_block_indicators(action=action, success=success, container=container, results=results, handle=handle)
return
# check for 'else' condition 2
join_risk_notable_protect_assets_and_users(action=action, success=success, container=container, results=results, handle=handle)
return
def indicator_get_by_tag(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("indicator_get_by_tag() called")
id_value = container.get("id", None)
parameters = []
parameters.append({
"tags_or": "marked_for_block",
"tags_and": None,
"container": id_value,
"tags_exclude": "blocked, safe",
"indicator_timerange": None,
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/indicator_get_by_tag", parameters=parameters, name="indicator_get_by_tag", callback=decision_4)
return
def on_finish(container, summary):
phantom.debug("on_finish() called")
################################################################################
## Custom Code Start
################################################################################
# This function is called after all actions are completed.
# summary of all the action and/or all details of actions
# can be collected here.
# summary_json = phantom.get_summary()
# if 'result' in summary_json:
# for action_result in summary_json['result']:
# if 'action_run_id' in action_result:
# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)
# phantom.debug(action_results)
################################################################################
## Custom Code End
################################################################################
return
|
nilq/baby-python
|
python
|
from learnware.feature.timeseries.ts_feature import *
import pandas as pd
import numpy as np
class TestTimeSeriesFeature:
def test_ts_feature_stationary_test(self):
df1 = pd.DataFrame(np.random.randint(0, 200, size=(100, 1)), columns=['x'])
df2 = pd.util.testing.makeTimeDataFrame(50)
df3 = pd.DataFrame([1, 2, 3, 2, 3, 1, 1, 1, 1, 5, 5, 5, 8, 9, 9, 10, 11, 12], columns=['x'])
assert time_series_stationary_test(df1['x'])
assert time_series_stationary_test(df2['A'])
assert time_series_stationary_test(df3['x']) == False
def test_ts_feature_seasonal_decompose(self):
df = pd.DataFrame(np.random.randint(1, 10, size=(365, 1)), columns=['value'],
index=pd.date_range('2021-01-01', periods=365, freq='D'))
ret = time_series_seasonal_decompose(df['value'])
assert "seasonal" in ret and len(ret["seasonal"]) == len(df)
assert "resid" in ret and len(ret["resid"]) == len(df)
assert "trend" in ret and len(ret["trend"]) == len(df)
def test_ts_feature_get_seasonal_value(self):
df = pd.DataFrame(np.random.randint(1, 10, size=(365, 1)), columns=['value'],
index=pd.date_range('2021-01-01', periods=365, freq='D'))
ret = time_series_seasonal_test(df['value'], [1, 30, 60, 120])
assert (type(ret) is list and len(ret) == 4)
|
nilq/baby-python
|
python
|
"""
datos de entrada
A -->int -->a
B -->int -->b
C -->int -->c
D --> int --> d
datos de salida
"""
#entradas
a = int ( input ( "digite el valor de A:" ))
c = int ( input ( "digite el valor de B:" ))
b = int ( input ( "digite el valor de C:" ))
d = int ( input ( "digite el valor de D:" ))
#cajanegra
resultado = ""
si ( c > 5 ):
c = 0
re = 0
segundo = segundo + 1
elif ( b == 9 ):
segundo = 1
elif ( c < 5 ):
c = 0
re = 0
elif ( c == 5 ):
re = 0
print ( "su numero redondeado es" , str ( a ) + str ( b ) + str ( c ) + str ( d ))
#salida
|
nilq/baby-python
|
python
|
from kivy.logger import Logger
from kivy.clock import mainthread
from jnius import autoclass
from android.activity import bind as result_bind
Gso = autoclass("com.google.android.gms.auth.api.signin.GoogleSignInOptions")
GsoBuilder = autoclass(
"com.google.android.gms.auth.api.signin.GoogleSignInOptions$Builder"
)
GSignIn = autoclass("com.google.android.gms.auth.api.signin.GoogleSignIn")
ApiException = autoclass("com.google.android.gms.common.api.ApiException")
PythonActivity = autoclass("org.kivy.android.PythonActivity")
context = PythonActivity.mActivity
RC_SIGN_IN = 10122
mGSignInClient = None
class GoogleActivityListener:
def __init__(self, success_listener, error_listener):
self.success_listener = success_listener
self.error_listener = error_listener
def google_activity_listener(self, request_code, result_code, data):
if request_code == RC_SIGN_IN:
Logger.info("KivyAuth: google_activity_listener called.")
task = GSignIn.getSignedInAccountFromIntent(data)
try:
account = task.getResult(ApiException)
if account:
Logger.info(
"KivyAuth: Google Login success.\
Calling success listener."
)
self.success_listener(
account.getDisplayName(),
account.getEmail(),
account.getPhotoUrl().toString(),
)
except Exception as e:
Logger.info(
"KivyAuth: Error signing in using Google. {}".format(e)
)
self.error_listener()
def initialize_google(success_listener, error_listener):
gso = GsoBuilder(Gso.DEFAULT_SIGN_IN).requestEmail().build()
global mGSignInClient
mGSignInClient = GSignIn.getClient(context, gso)
gal = GoogleActivityListener(success_listener, error_listener)
result_bind(on_activity_result=gal.google_activity_listener)
Logger.info("KivyAuth: Initialized google signin")
# @mainthread
def login_google():
Logger.info("KivyAuth: Initiated google login")
signInIntent = mGSignInClient.getSignInIntent()
context.startActivityForResult(signInIntent, RC_SIGN_IN)
def logout_google(after_logout):
mGSignInClient.signOut()
after_logout()
Logger.info("KivyAuth: Logged out from google login")
|
nilq/baby-python
|
python
|
import numpy as np; from random import choices
import matplotlib.pyplot as plt;
def Kroupa(N):
'''
Calculates N stellar masses drawing from a Kroupa IMF 0.08 < m < 130
Input >>> N = number of stars wanted
Output >>> masses = N-sized array of stellar masses
'''
# Create a list of potential masses and then calculate their weights by using Kroupa IMF
potential_mass = np.logspace(np.log10(0.08), np.log10(130), 10**4, endpoint=True)
weights_low = 0.204*potential_mass[np.where(potential_mass<0.5)]**(-1.3) # Probabilities below m=0.5Msol
weights_high = 0.204*potential_mass[np.where(potential_mass>=0.5)]**(-2.3) # Probabilities above m=0.5M_sol
weights_total = np.append(weights_low, weights_high)
# Picking the final masses based on the weights
masses = choices(potential_mass, weights_total,k=N)
return masses
masses = Kroupa(1000)
fig, ax = plt.subplots()
ax.hist(masses, bins=50, density =True, histtype='step')
plt.show()
|
nilq/baby-python
|
python
|
import sys
from utils import write_exp_utils
import pandas as pd
from utils import misc_utils
import psycopg2
from psycopg2.extras import Json, DictCursor
def main(argv):
print(argv[1])
w = write_exp_utils.ExperimentConfig(argv[1], argv[2])
print("writing {} to database".format(argv[1]) )
w.write_to_db()# write experiment on database
# check if the experiment is written correctly
q = 'select experiment_id from rws_experiment.experiment_table order by experiment_id desc limit 1;'
conn = misc_utils.connect_rds()
print(pd.read_sql(q, conn))
if __name__== '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import sys
from gala import imio, classify, features, morpho, agglo, evaluate as ev
from scipy.ndimage import label
from skimage.morphology import dilation, erosion
from skimage.morphology import square, disk
import argparse
from skimage import morphology as skmorph
import pickle
def get_parser():
parser = argparse.ArgumentParser(description='GALA neuron Aggolmeration script')
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument(
'-m',
'--mode',
required=True,
help='Train(0) or Deploy(1)')
parser.add_argument(
'--prob_file',
required=True,
help='Probability map file')
parser.add_argument(
'--gt_file',
required=False,
help='Ground truth file')
parser.add_argument(
'--ws_file',
required=False,
help='Watershed file')
parser.add_argument(
'--train_file',
required=False,
help='Pretrained classifier file')
parser.add_argument(
'-o',
'--outfile',
required=True,
help='Output file')
parser.add_argument('--seeds_cc_threshold', type=int, default=5,
help='Cutoff threshold on seed size')
parser.add_argument('--agg_threshold', type=float, default=0.5,
help='Cutoff threshold for agglomeration classifier')
return parser
def train(args):
gt_train, pr_train, ws_train = (map(imio.read_h5_stack,
[args.gt_file, args.prob_file,
args.ws_file]))
#['train-gt.lzf.h5', 'train-p1.lzf.h5',
# 'train-ws.lzf.h5']))
#print('training')
#gt_train = np.load(args.gt_file) #X,Y,Z
#gt_train = np.transpose(gt_train,(2,0,1)) #gala wants z,x,y?
#pr_train = np.load(args.prob_file) #X,Y,Z
#pr_train = np.transpose(np.squeeze(pr_train),(2,0,1)) #gala wants z,x,y?
#pr_train = pr_train[0:50,0:256,0:256]
#pr_train = np.around(pr_train,decimals=2)
#gt_train = gt_train[0:50,0:256,0:256]
#print('watershed')
#seeds = label(pr_train==0)[0]
#seeds_cc_threshold = args.seeds_cc_threshold
#seeds = morpho.remove_small_connected_components(seeds,
# seeds_cc_threshold)
#ws_train = skmorph.watershed(pr_train, seeds)
fm = features.moments.Manager()
fh = features.histogram.Manager()
fc = features.base.Composite(children=[fm, fh])
g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
(X, y, w, merges) = g_train.learn_agglomerate(gt_train, fc)[0]
y = y[:, 0] # gala has 3 truth labeling schemes, pick the first one
rf = classify.DefaultRandomForest().fit(X, y)
learned_policy = agglo.classifier_probability(fc, rf)
#save learned_policy
#np.savez(args.outfile, rf=rf, fc=fc)
binary_file = open(args.outfile,mode='wb')
lp_dump = pickle.dump([fc,rf], binary_file)
binary_file.close()
def deploy(args):
#probability map
print("Deploying through driver")
if args.prob_file.endswith('.hdf5'):
mem = imio.read_image_stack(args.prob_file, single_channel=False)
else:
mem = np.load(args.prob_file) #X,Y,Z
mem = np.transpose(np.squeeze(mem),(2,0,1)) #gala wants z,x,y?
pr_test = np.zeros_like(mem)
for z in range(0,mem.shape[0]):
pr_test[z,:,:] = dilation(mem[z,:,:], disk(10))
pr_test[z,:,:] = erosion(mem[z,:,:], disk(4))
seg_out = np.zeros(pr_test.shape)
pr_dim = pr_test.shape
xsize = pr_dim[1]
ysize = pr_dim[2]
zsize = pr_dim[0]
print(pr_dim)
print(pr_dim[0])
print(np.int(pr_dim[0]/zsize))
print("Starting loop")
for iz in range(0,np.int(pr_dim[0]/zsize)):
for ix in range(0,np.int(pr_dim[1]/xsize)):
for iy in range(0,np.int(pr_dim[2]/ysize)):
p0 = pr_test[iz*zsize+0:iz*zsize+zsize,ix*xsize+0:ix*xsize+xsize,iy*ysize+0:iy*ysize+ysize]
p0 = np.around(p0,decimals=2)
print(p0)
#get trained classifier
#npzfile = np.load(args.train_file)
#rf = npzfile['rf']
#fc = npzfile['fc']
binary_file = open(args.train_file,mode='rb')
print(binary_file)
temp = pickle.load(binary_file)
fc = temp[0]
rf = temp[1]
binary_file.close()
learned_policy = agglo.classifier_probability(fc, rf)
#pr_test = (map(imio.read_h5_stack,
# ['test-p1.lzf.h5']))
print('watershed')
seeds = label(p0==0)[0]
seeds_cc_threshold = args.seeds_cc_threshold
seeds = morpho.remove_small_connected_components(seeds,
seeds_cc_threshold)
ws_test = skmorph.watershed(p0, seeds)
g_test = agglo.Rag(ws_test, p0, learned_policy, feature_manager=fc)
g_test.agglomerate(args.agg_threshold)
#This is a map of labels of the same shape as the original image.
seg_test1 = g_test.get_segmentation()
seg_out[iz*zsize+0:iz*zsize+zsize,ix*xsize+0:ix*xsize+xsize,iy*ysize+0:iy*ysize+ysize] = seg_test1
seg_out = np.transpose(seg_out,(1,2,0))
with open(args.outfile, 'wb') as f:
np.save(f,seg_out)
return
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
if(int(args.mode)==0):
train(args)
else:
deploy(args)
|
nilq/baby-python
|
python
|
from __future__ import print_function
from timeit import default_timer as timer
import json
import datetime
print('Loading function')
def eratosthenes(n):
sieve = [ True for i in range(n+1) ]
def markOff(pv):
for i in range(pv+pv, n+1, pv):
sieve[i] = False
markOff(2)
for i in range(3, n+1):
if sieve[i]:
markOff(i)
return [ i for i in range(1, n+1) if sieve[i] ]
def lambda_handler(event, context):
start = timer()
#print("Received event: " + json.dumps(event, indent=2))
maxPrime = int(event['queryStringParameters']['max'])
numLoops = int(event['queryStringParameters']['loops'])
print("looping " + str(numLoops) + " time(s)")
for loop in range (0, numLoops):
primes = eratosthenes(maxPrime)
print("Highest 3 primes: " + str(primes.pop()) + ", " + str(primes.pop()) + ", " + str(primes.pop()))
durationSeconds = timer() - start
return {"statusCode": 200, \
"headers": {"Content-Type": "application/json"}, \
"body": "{\"durationSeconds\": " + str(durationSeconds) + \
", \"max\": " + str(maxPrime) + ", \"loops\": " + str(numLoops) + "}"}
|
nilq/baby-python
|
python
|
"""
--- Day 1: The Tyranny of the Rocket Equation ---
https://adventofcode.com/2019/day/1
"""
class FuelCounterUpper:
"""Determines the amount of fuel required to launch"""
@classmethod
def calc_fuel_req(cls, mass: int) -> int:
"""calc fuel required for moving input mass
Don't forget to account for the weight of the fuel, too!
Returns:
int -- fuel required
"""
fuel_need = max(int(mass / 3) - 2, 0)
if fuel_need == 0:
return 0
return fuel_need + cls.calc_fuel_req(fuel_need)
if __name__ == "__main__":
fcu = FuelCounterUpper()
with open("inputs/day01") as f:
masses = f.readlines()
total_fuel = sum([fcu.calc_fuel_req(int(m)) for m in masses])
print(f"total fuel required = { total_fuel }")
|
nilq/baby-python
|
python
|
from pyleap import *
bg = Rectangle(0, 0, window.width, window.height, color="white")
r = Rectangle(color=(125, 125, 0))
line1 = Line(100, 200, 300, 400, 15, 'pink')
tri = Triangle(200, 100, 300, 100, 250, 150, "green")
c2 = Circle(200, 200, 50, "#ffff00")
c = Circle(200, 200, 100, "red")
txt = Text('Hello, world')
c.transform.scale_y = 0.5
c2.opacity = 0.5
def update(dt):
r.x += 1
r.y += 1
c.x += 1
line1.transform.rotation += 1
c.transform.rotation -= 1
def draw(dt):
# update()
window.clear()
bg.draw()
window.show_axis()
Rectangle(100, 100, 50, 25, 'pink').draw()
r.stroke()
line1.draw()
tri.stroke()
c.stroke()
c2.draw()
txt.draw()
window.show_fps()
def start_move():
repeat(update)
def stop_move():
stop(update)
mouse.on_press(start_move)
mouse.on_release(stop_move)
repeat(draw)
run()
|
nilq/baby-python
|
python
|
# ======================================================================
# Timing is Everything
# Advent of Code 2016 Day 15 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# Tests from
# https://rosettacode.org/wiki/Chinese_remainder_theorem#Functional
# https://www.reddit.com/r/adventofcode/comments/5ifn4v/2016_day_15_solutions/
# ======================================================================
# ======================================================================
# t e s t _ c r t . p y
# ======================================================================
"Test Cmt for Advent of Code 2016 day 15, Timing is Everything"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import unittest
import crt
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ======================================================================
# TestCRT
# ======================================================================
class TestCRT(unittest.TestCase): # pylint: disable=R0904
"Test CRT object"
def test_rosetta_code_examples(self):
"Test examples from rosettacode"
self.assertEqual(crt.chinese_remainder([3, 5, 7], [2, 3, 2]), 23)
self.assertEqual(crt.chinese_remainder([5, 13], [2, 3]), 42)
self.assertEqual(crt.chinese_remainder([100, 23], [19, 0]), 1219)
self.assertEqual(crt.chinese_remainder([11, 12, 13], [10, 4, 12]), 1000)
self.assertEqual(crt.chinese_remainder([5, 7, 9, 11], [1, 2, 3, 4]), 1731)
self.assertEqual(crt.chinese_remainder(
[17353461355013928499, 3882485124428619605195281, 13563122655762143587],
[7631415079307304117, 1248561880341424820456626, 2756437267211517231]),
937307771161836294247413550632295202816)
def test_part_one_example(self):
"Test example from part one description [disc sizes], [initial values]"
self.assertEqual(crt.chinese_remainder([5, 2], [-4, -1 - 1]), 5 + 1)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end t e s t _ c r t . p y end
# ======================================================================
|
nilq/baby-python
|
python
|
import time
import random
import sqlite3
from parsers import OnePageParse
from parsers import SeparatedPageParser
from parsers import adultCollector
from history import History
conn = sqlite3.connect('killmepls.db')
c = conn.cursor()
for row in c.execute("SELECT MAX(hID) FROM stories"):
last_hID = row[0]
print(last_hID)
list_of_histories = []
currentURL = 'https://killpls.me'
baseURL = 'https://killpls.me'
main_page = OnePageParse(currentURL, baseURL)
main_page.startParsing()
historyChecking = main_page.getListOfHistories()
adultCollector(list_of_histories, historyChecking, baseURL)
nextURL = main_page.getNextParsingPage()
counter = 1
while nextURL:
print('Next: {}'.format(nextURL))
currentPage = OnePageParse(nextURL, baseURL)
currentPage.startParsing()
historyChecking = currentPage.getListOfHistories()
adultCollector(list_of_histories, historyChecking, baseURL)
if last_hID in list(map(lambda x : x.historyID, list_of_histories)):
print("We've faced history with ID = {}. Collection of histories stopped.".format(last_hID))
break
delay_sec = random.randint(1,5)
print('Delay : {} seconds'.format(delay_sec))
time.sleep(delay_sec)
print('At iteration: {} we have {} histories'.format(counter, len(list_of_histories)))
nextURL = currentPage.getNextParsingPage()
counter += 1
sqlite_insert_with_param = """INSERT INTO 'stories'
('hID', 'hdate', 'url', 'history', 'tags', 'votes', 'lastAccess', 'adult')
VALUES (?, ?, ?, ?, ?, ?, ?, ?);"""
for one_history in list_of_histories:
data_tuple = (one_history.historyID,
one_history.historyTime,
one_history.historyURL,
one_history.historyText,
' '.join(one_history.historyTags),
one_history.historyVotes,
one_history.lastAccessTime,
one_history.adultFlag)
try:
c.execute(sqlite_insert_with_param, data_tuple)
except sqlite3.IntegrityError:
print("Uniqueness violation: {}\t{}".format(data_tuple[0], data_tuple[2] ))
conn.commit()
conn.close()
|
nilq/baby-python
|
python
|
import math
import sys
import string
sys.path.append("../..")
from MolecularSystem import System
x = System(None)
y = System(None)
z = System(None)
x.load_pdb('1KAW.pdb')
y.load_pdb('1L1OA.pdb')
z.load_pdb('1L1OB.pdb')
for prot in [x,y,z]:
prot.ProteinList[0].fill_pseudo_sidechains(1)
prot.ProteinList[0].fill_neighbors_lists(0.35,15.0)
x.res_list = [8, 14,31,32,33,34,35,57, 58, 59, 71, 77, 78, 79, 109]
y.res_list = [25,31,40,41,42,43,44,53, 54, 55, 63, 67, 68, 69, 84 ]
z.res_list = [74,80,91,92,93,94,95,104,105,106,120,126,127,128,146]
dsf = 0.15
do_replicate = 1
replicate_thresh = 0.05
shell_start = 7.0
shell_end = 13.0
p_lo_hash = {}
p_hi_hash = {}
combinations = 0
beta_dist_sum = 0.0
beta_dist_cnt = 0.0
beta_dist_lzst = []
p_cnt = -1
for p in [x,y,z]:
p_cnt += 1
for rn1 in range(len(p.res_list)-3):
b1 = p.ProteinList[0].residue_dict[p.res_list[rn1]].central_atom
c1 = p.ProteinList[0].residue_dict[p.res_list[rn1]].pseudo_sidechain
x1,y1,z1 = c1.x,c1.y,c1.z
xb1,yb1,zb1 = b1.x,b1.y,b1.z
for rn2 in range(rn1+1,len(p.res_list)-2):
b2 = p.ProteinList[0].residue_dict[p.res_list[rn2]].pseudo_sidechain
c2 = p.ProteinList[0].residue_dict[p.res_list[rn2]].central_atom
d2 = c1.dist(c2)
for rn3 in range(rn2+1,len(p.res_list)-1):
b3 = p.ProteinList[0].residue_dict[p.res_list[rn3]].pseudo_sidechain
c3 = p.ProteinList[0].residue_dict[p.res_list[rn3]].central_atom
d3 = c1.dist(c3)
for rn4 in range(rn3+1,len(p.res_list)):
b4 = p.ProteinList[0].residue_dict[p.res_list[rn4]].pseudo_sidechain
c4 = p.ProteinList[0].residue_dict[p.res_list[rn4]].central_atom
d4 = c1.dist(c4)
dist_list = [d2, d3, d4]
for d in dist_list:
if d<=shell_start or d>=shell_end:
break
else:
atom_list = [c2,c3,c4]
beta_list = [b2,b3,b4]
atom_num_list = [c2.atom_number, c3.atom_number, c4.atom_number]
sorted_list = [c2.atom_number, c3.atom_number, c4.atom_number]
sorted_list.sort()
f = [0,0,0]
for i in range(len(sorted_list)):
for j in range(len(dist_list)):
if atom_num_list[j] == sorted_list[i]:
f[i] = j
xs = [atom_list[f[0]].x, atom_list[f[1]].x, atom_list[f[2]].x]
ys = [atom_list[f[0]].y, atom_list[f[1]].y, atom_list[f[2]].y]
zs = [atom_list[f[0]].z, atom_list[f[1]].z, atom_list[f[2]].z]
xbs = [beta_list[f[0]].x, beta_list[f[1]].x, beta_list[f[2]].x]
ybs = [beta_list[f[0]].y, beta_list[f[1]].y, beta_list[f[2]].y]
zbs = [beta_list[f[0]].z, beta_list[f[1]].z, beta_list[f[2]].z]
new_distance_list = [math.sqrt(((x1- xs[0])**2) + ((y1- ys[0])**2) + ((z1- zs[0])**2)),
math.sqrt(((x1- xs[1])**2) + ((y1- ys[1])**2) + ((z1- zs[1])**2)),
math.sqrt(((x1- xs[2])**2) + ((y1- ys[2])**2) + ((z1- zs[2])**2)),
math.sqrt(((xs[0]-xs[1])**2) + ((ys[0]-ys[1])**2) + ((zs[0]-zs[1])**2)),
math.sqrt(((xs[0]-xs[2])**2) + ((ys[0]-ys[2])**2) + ((zs[0]-zs[2])**2)),
math.sqrt(((xs[1]-xs[2])**2) + ((ys[1]-ys[2])**2) + ((zs[1]-zs[2])**2))]
bet_distance_list = [math.sqrt(((xb1- xbs[0])**2) + ((yb1- ybs[0])**2) + ((zb1- zbs[0])**2)),
math.sqrt(((xb1- xbs[1])**2) + ((yb1- ybs[1])**2) + ((zb1- zbs[1])**2)),
math.sqrt(((xb1- xbs[2])**2) + ((yb1- ybs[2])**2) + ((zb1- zbs[2])**2)),
math.sqrt(((xbs[0]-xbs[1])**2) + ((ybs[0]-ybs[1])**2) + ((zbs[0]-zbs[1])**2)),
math.sqrt(((xbs[0]-xbs[2])**2) + ((ybs[0]-ybs[2])**2) + ((zbs[0]-zbs[2])**2)),
math.sqrt(((xbs[1]-xbs[2])**2) + ((ybs[1]-ybs[2])**2) + ((zbs[1]-zbs[2])**2))]
hires_distances = [new_distance_list[0], new_distance_list[1], new_distance_list[2], new_distance_list[3], new_distance_list[4], new_distance_list[5]]
lowres_dl_bins = [[],[],[],[],[],[]]
lowres_dlstrings = []
for i in range(len(new_distance_list)):
lowres_dl_bins[i].append(math.floor(dsf*new_distance_list[i]))
if do_replicate:
if (new_distance_list[i]*dsf)%1.0 <= replicate_thresh: # if the distance is just over an integer change
lowres_dl_bins[i].append((math.floor(dsf*new_distance_list[i]))-1)
elif (new_distance_list[i]*dsf)%1.0 >= (1.0-replicate_thresh):
lowres_dl_bins[i].append((math.floor(dsf*new_distance_list[i]))+1)
if do_replicate:
for i0 in lowres_dl_bins[0]:
for i1 in lowres_dl_bins[1]:
for i2 in lowres_dl_bins[2]:
for i3 in lowres_dl_bins[3]:
for i4 in lowres_dl_bins[4]:
for i5 in lowres_dl_bins[5]:
lowres_dlstrings.append('%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_'%(i0,i1,i2,i3,i4,i5))
else:
lowres_dlstrings.append('%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_'%(lowres_dl_bins[0][0],
lowres_dl_bins[1][0],
lowres_dl_bins[2][0],
lowres_dl_bins[3][0],
lowres_dl_bins[4][0],
lowres_dl_bins[5][0]))
index_key = '%s %s %s %s'%(rn1,rn2,rn3,rn4)
try:
p_lo_hash[index_key]
except KeyError:
p_lo_hash[index_key] = [{'lowstr':lowres_dlstrings, 'betas':bet_distance_list, 'hilist':hires_distances}]
else:
p_lo_hash[index_key].append({'lowstr':lowres_dlstrings, 'betas':bet_distance_list, 'hilist':hires_distances})
keys = p_lo_hash.keys()
keys.sort()
good_count = 0
print '%s combinations'%(combinations)
print '%s keys'%(len(keys))
sum_alpha1 = 0.0
sum_beta1 = 0.0
sum_alpha2 = 0.0
sum_beta2 = 0.0
cnt_alpha = 0.0
values = []
distance_count = 0
for key in keys:
if len(p_lo_hash[key]) == 3:
bail = 0
for s1 in range(len(p_lo_hash[key][0]['lowstr'])):
for s2 in range(len(p_lo_hash[key][1]['lowstr'])):
for s3 in range(len(p_lo_hash[key][2]['lowstr'])):
if p_lo_hash[key][0]['lowstr'][s1] == p_lo_hash[key][1]['lowstr'][s2]:
if p_lo_hash[key][1]['lowstr'][s2] == p_lo_hash[key][2]['lowstr'][s3]:
dist1, dist2, dist3, dist4 = 0.0, 0.0, 0.0, 0.0
# accumulate the squared distance
for d_ind in range(len(p_lo_hash[key][0]['hilist'])):
d1 = (p_lo_hash[key][0]['hilist'][d_ind] - p_lo_hash[key][1]['hilist'][d_ind])**2
d2 = (p_lo_hash[key][0]['hilist'][d_ind] - p_lo_hash[key][2]['hilist'][d_ind])**2
d3 = (p_lo_hash[key][0]['betas'][d_ind] - p_lo_hash[key][1]['betas'][d_ind])**2
d4 = (p_lo_hash[key][0]['betas'][d_ind] - p_lo_hash[key][2]['betas'][d_ind])**2
dist1 += d1
dist2 += d2
dist3 += d3
dist4 += d4
distance_count += 1
ln = len(p_lo_hash[key][0]['hilist'])
dist1,dist2,dist3,dist4 = math.sqrt(dist1/ln), math.sqrt(dist2/ln), math.sqrt(dist3/ln), math.sqrt(dist4/ln)
values.append([dist1, dist2, dist3, dist4])
sum_alpha1 += dist1
sum_alpha2 += dist2
sum_beta1 += dist3
sum_beta2 += dist4
bail = 1
print '\n*\n',
print '1 alpha %4.2f, beta %4.2f'%(dist1, dist3)
print '2 alpha %4.2f, beta %4.2f'%(dist2, dist4)
good_count += 1.0
break
if bail:
break
if bail:
break
key_tokens = string.split(key)
key_tokens[0] = int(key_tokens[0])
key_tokens[1] = int(key_tokens[1])
key_tokens[2] = int(key_tokens[2])
key_tokens[3] = int(key_tokens[3])
print '%s\n[[%3s,%3s,%3s,%3s], [%3s,%3s,%3s,%3s], [%3s,%3s,%3s,%3s]]'%(p_lo_hash[key][0]['lowstr'], x.res_list[key_tokens[0]],x.res_list[key_tokens[1]],x.res_list[key_tokens[2]],x.res_list[key_tokens[3]],y.res_list[key_tokens[0]],y.res_list[key_tokens[1]],y.res_list[key_tokens[2]],y.res_list[key_tokens[3]],z.res_list[key_tokens[0]],z.res_list[key_tokens[1]],z.res_list[key_tokens[2]],z.res_list[key_tokens[3]])
# calculate the standard deviation of the different core analogies
sum = [0.0, 0.0, 0.0, 0.0]
for value in values:
sum[0] += (value[0] - (sum_alpha1/good_count))**2
sum[1] += (value[1] - (sum_alpha2/good_count))**2
sum[2] += (value[2] - (sum_beta1/good_count))**2
sum[3] += (value[3] - (sum_beta2/good_count))**2
for i in range(len(sum)):
sum[i] /= (len(values)-1.0)
for i in range(len(sum)):
sum[i] = math.sqrt(sum[i])
print '%s of %s good (%s)'%(good_count, len(keys), good_count/(len(keys)+0.0))
print 'averages - a1 %4.2f a2 %4.2f b1 %4.2f b2 %4.2f'%(sum_alpha1/good_count, sum_alpha2/good_count, sum_beta1/good_count, sum_beta2/good_count)
print 'deviatio - %4.2f %4.2f %4.2f %4.2f'%(sum[0], sum[1], sum[2], sum[3])
|
nilq/baby-python
|
python
|
from ajenti.api import *
from ajenti.plugins.main.api import SectionPlugin
from ajenti.ui import on
from ajenti.ui.binder import Binder
from reconfigure.configs import ResolvConfig
from reconfigure.items.resolv import ItemData
@plugin
class Resolv (SectionPlugin):
def init(self):
self.title = _('Nameservers')
self.icon = 'globe'
self.category = _('System')
self.append(self.ui.inflate('resolv:main'))
self.find('name-box').labels = [_('DNS nameserver'), _('Local domain name'), _('Search list'), _('Sort list'), _('Options')]
self.find('name-box').values = ['nameserver', 'domain', 'search', 'sortlist', 'options']
self.config = ResolvConfig(path='/etc/resolv.conf')
self.binder = Binder(None, self.find('resolv-config'))
self.find('items').new_item = lambda c: ItemData()
def on_page_load(self):
self.config.load()
self.binder.setup(self.config.tree).populate()
@on('save', 'click')
def save(self):
self.binder.update()
self.config.save()
|
nilq/baby-python
|
python
|
import pandas as pd
from pandas import ExcelWriter
counties_numbers_to_names = {
3: "Santa Clara",
4: "Alameda",
5: "Contra Costa",
2: "San Mateo",
8: "Sonoma",
1: "San Francisco",
6: "Solano",
9: "Marin",
7: "Napa"
}
counties_map = pd.read_csv("data/taz_geography.csv", index_col="zone").\
county.map(counties_numbers_to_names)
writer = ExcelWriter('county_output.xlsx')
parcels_to_counties = pd.HDFStore("data/2015_09_01_bayarea_v3.h5", "r").\
parcels.zone_id.map(counties_map)
for run in range(1308, 1312):
df = pd.read_csv("http://urbanforecast.com/runs/"\
"run%d_parcel_output.csv" % run)
df["county"] = df.parcel_id.map(parcels_to_counties)
growthinpdas = df[(df.building_type_id <= 3) & (df.pda.notnull())].\
groupby("county").net_units.sum()
growthnotinpdas = df[(df.building_type_id <= 3) & (df.pda.isnull())].\
groupby("county").net_units.sum()
pctgrowthinpdas = growthinpdas / (growthnotinpdas+growthinpdas)
print pctgrowthinpdas
baseyear = pd.read_csv("output/baseyear_taz_summaries_2010.csv")
baseyear["county"] = baseyear.zone_id.map(counties_map)
outyear = pd.read_csv("http://urbanforecast.com/runs/"\
"run%d_taz_summaries_2040.csv" % run)
outyear["county"] = outyear.zone_id.map(counties_map)
hhpctgrowth = outyear.groupby("county").TOTPOP.sum() / \
baseyear.groupby("county").TOTPOP.sum() - 1
s = outyear.groupby("county").TOTPOP.sum() - \
baseyear.groupby("county").TOTPOP.sum()
hhgrowthshare = s / s.sum()
emppctgrowth = outyear.groupby("county").TOTEMP.sum() / \
baseyear.groupby("county").TOTEMP.sum() - 1
s = outyear.groupby("county").TOTEMP.sum() - \
baseyear.groupby("county").TOTEMP.sum()
empgrowthshare = s / s.sum()
growthinunits = outyear.eval("SFDU + MFDU").groupby(outyear.county).sum() - \
baseyear.eval("SFDU + MFDU").groupby(baseyear.county).sum()
growthinmultifamily = outyear.groupby(outyear.county).MFDU.sum() - \
baseyear.groupby(baseyear.county).MFDU.sum()
pct_multifamily_growth = growthinmultifamily / growthinunits
df = pd.DataFrame({
"pct_growth_in_pdas": pctgrowthinpdas,
"hh_pct_growth": hhpctgrowth,
"hh_growth_share": hhgrowthshare,
"emp_pct_growth": emppctgrowth,
"emp_growth_share": empgrowthshare,
"growth_in_units": growthinunits.astype('int'),
"pct_multifamily_growth": pct_multifamily_growth.clip(upper=1.0)
})
df.index.name = None
df.to_excel(writer, 'run%d' % run, float_format="%.2f")
|
nilq/baby-python
|
python
|
#|=============================================================================
#|
#| FILE: ports.py [Python module source code]
#|
#| SYNOPSIS:
#|
#| The purpose of this module is simply to define
#| some easy-to-remember constants naming the port
#| numbers used by this application.
#|
#| SYSTEM CONTEXT:
#|
#| This file is part of the central server
#| application for the COSMICi project.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Names exported from this package.
__all__ = [ 'COSMO_PORT', # Global constant port numbers.
'LASER_PORT',
'MESON_PORT',
'DISCO_PORT' ]
# Global declaration.
global COSMO_PORT, LASER_PORT, MESON_PORT
#|===========================================================
#| Port numbers. [global constants]
#|
#| Define some handy global port numbers based on
#| easy-to-remember touch-tone mnemonics.
#|
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#|-----------------------------------------------------------------
#|
#| COSMO_PORT [global constant]
#|
#| This is the main port on which we listen
#| for the main (initial) connection from
#| each remote node in the local sensor net.
#| We process server commands sent to it.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
COSMO_PORT = 26766
#|-------------------------------------------------------------------
#|
#| LASER_PORT [global constant]
#|
#| We listen at this port number (and subsequent
#| ones) for the AUXIO (STDIO replacement) stream
#| from each remote node (used for diagnostics &
#| user interaction with the remote command
#| processor). This is the base port number (for
#| node #0), the node number gets added to it to
#| find the port number to be used by other nodes.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
LASER_PORT = 52737 # Use this port and subsequent ones for bridged AUXIO connections to the UWscript.
#|-------------------------------------------------------------------
#|
#| MESON_PORT [global constant]
#|
#| We listen at this port number (and subsequent
#| ones) for the bridged UART data stream from
#| each remote node. This is the base port number
#| (for node #0), the node number gets added to it
#| to find the port number for other nodes.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
MESON_PORT = 63766 # Use this port and subsequent ones for bridged UART connections to the digitizer boards.
DISCO_PORT = 34726 # Use this port for server IP address discovery.
#|^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#| END FILE: ports.py
#|----------------------------------------------------------------------
|
nilq/baby-python
|
python
|
from __future__ import annotations
import skia
from core.base import View, Rect
from views.enums import Alignment, Justify
class HBox(View):
def __init__(self):
super(HBox, self).__init__()
self._alignment = Alignment.BEGIN
self._justify = Justify.BEGIN
self._spacing = 0
self._height = None
self._width = None
self._wrap = False
self._grow = {}
self._view_width = 0
self._view_height = 0
def _lay_out_items(
self,
canvas: skia.Canvas,
x: float,
y: float,
width: float,
height: float,
draw: bool = False
) -> None:
content_x = self._spacing
max_height = 0
rows = []
row = []
view_width = self._width or width
if view_width:
view_width -= self._left_padding + self._right_padding
for item in self._children:
bounding_rect = item.get_bounding_rect()
max_height = max(max_height, bounding_rect.height)
if self._wrap and view_width and content_x + self._spacing + bounding_rect.width > view_width:
rows.append({
'row': row,
'row_items_width': content_x,
})
row = []
content_x = self._spacing
row.append({
'width': bounding_rect.width,
'height': bounding_rect.height,
'item': item,
})
content_x += bounding_rect.width + self._spacing
if row:
rows.append({
'row': row,
'row_items_width': content_x,
})
content_x = self._spacing
content_y = self._spacing
for row_info in rows:
row = row_info['row']
leftover_width = view_width - row_info['row_items_width']
for idx, item_info in enumerate(row):
item = item_info['item']
item_width = item_info['width']
item_height = item_info['height']
if self._justify == Justify.END and idx == 0:
content_x += leftover_width
if self._justify == Justify.SPACE_AROUND:
content_x += leftover_width / (len(row) + 1)
if self._justify == Justify.SPACE_BETWEEN and idx != 0:
content_x += leftover_width / (len(row) - 1)
if draw:
if self._alignment == Alignment.BEGIN:
item.draw(canvas, x + content_x, y + content_y, width, height)
elif self._alignment == Alignment.END:
item.draw(canvas, x + content_x, y + content_y + (max_height - item_height), width, height)
elif self._alignment == Alignment.CENTER:
item.draw(canvas, x + content_x, y + content_y + (max_height - item_height) / 2, width, height)
if self._justify == Justify.SPACE_AROUND and idx == len(row) - 1:
content_x += leftover_width / (len(row) + 1)
content_x += item_width + self._spacing
self._view_width = max(self._view_width, content_x)
content_y += max_height + self._spacing
self._view_height = content_y
content_x = self._spacing
def draw(self, canvas: skia.Canvas, x: float, y: float, width: float, height: float) -> None:
x += self._x + (self._left_padding or 0) + (self._left_margin or 0)
y += self._y + (self._top_padding or 0) + (self._top_margin or 0)
self._lay_out_items(
canvas,
x,
y,
width - (self._left_padding or 0) - (self._right_padding or 0),
height - (self._top_padding or 0) - (self._bottom_padding or 0),
draw=True,
)
def get_bounding_rect(self) -> Rect:
width = self._width
height = self._height
if height is None or width is None:
self._lay_out_items(None, 0, 0, 640, 480)
height = height or self._view_height
width = width or self._view_width
return Rect(
x=0,
y=0,
width=self._left_margin + width + self._right_margin,
height=self._top_margin + height + self._bottom_margin,
)
def alignment(self, alignment) -> HBox:
self._alignment = alignment
return self
def justify(self, justify) -> HBox:
self._justify = justify
return self
def spacing(self, spacing: float) -> HBox:
self._spacing = spacing
return self
def width(self, width: float) -> HBox:
self._width = width
return self
def height(self, height: float) -> HBox:
self._height = height
return self
def wrap(self, wrap: bool = False) -> HBox:
self._wrap = wrap
return self
def grow(self, view: View, priority: int) -> HBox:
self._grow[view] = priority
return self
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from gevent import monkey, event
monkey.patch_all()
import uuid
import unittest
import datetime
import requests_mock
from gevent.queue import Queue
from gevent.hub import LoopExit
from time import sleep
from mock import patch, MagicMock
from openprocurement.bot.identification.client import DocServiceClient
from openprocurement.bot.identification.databridge.upload_file_to_doc_service import UploadFileToDocService
from openprocurement.bot.identification.databridge.utils import generate_doc_id, item_key
from openprocurement.bot.identification.databridge.process_tracker import ProcessTracker
from openprocurement.bot.identification.databridge.data import Data
from openprocurement.bot.identification.tests.utils import custom_sleep, generate_answers, AlmostAlwaysFalse
from openprocurement.bot.identification.databridge.constants import file_name, DOC_TYPE
from openprocurement.bot.identification.databridge.sleep_change_value import APIRateController
class TestUploadFileWorker(unittest.TestCase):
__test__ = True
def setUp(self):
self.tender_id = uuid.uuid4().hex
self.award_id = uuid.uuid4().hex
self.qualification_id = uuid.uuid4().hex
self.document_id = generate_doc_id()
self.process_tracker = ProcessTracker(db=MagicMock())
self.process_tracker.set_item(self.tender_id, self.award_id, 1)
self.upload_to_doc_service_queue = Queue(10)
self.upload_to_tender_queue = Queue(10)
self.sleep_change_value = APIRateController()
self.sna = event.Event()
self.sna.set()
self.data = Data(self.tender_id, self.award_id, '123', 'awards',
{'meta': {'id': self.document_id}, 'test_data': 'test_data'})
self.qualification_data = Data(self.tender_id, self.qualification_id, '123', 'qualifications',
{'meta': {'id': self.document_id}, 'test_data': 'test_data'})
self.doc_service_client = DocServiceClient(host='127.0.0.1', port='80', user='', password='')
self.worker = UploadFileToDocService(self.upload_to_doc_service_queue, self.upload_to_tender_queue,
self.process_tracker, self.doc_service_client, self.sna,
self.sleep_change_value)
self.url = '{url}'.format(url=self.doc_service_client.url)
@staticmethod
def stat_200():
return {'data': {'url': 'http://docs-sandbox.openprocurement.org/get/8ccbfde0c6804143b119d9168452cb6f',
'format': 'application/yaml',
'hash': 'md5:9a0364b9e99bb480dd25e1f0284c8555',
'title': file_name}}
@staticmethod
def get_tender():
return {'data': {'id': uuid.uuid4().hex,
'documentOf': 'tender',
'documentType': DOC_TYPE,
'url': 'url'}}
def tearDown(self):
del self.worker
def is_working(self, worker):
return self.upload_to_doc_service_queue.qsize() or worker.retry_upload_to_doc_service_queue.qsize()
def shutdown_when_done(self, worker):
worker.start()
while self.is_working(worker):
sleep(0.1)
worker.shutdown()
def test_init(self):
worker = UploadFileToDocService.spawn(None, None, None, None, self.sna, None)
self.assertGreater(datetime.datetime.now().isoformat(),
worker.start_time.isoformat())
self.assertEqual(worker.upload_to_doc_service_queue, None)
self.assertEqual(worker.upload_to_tender_queue, None)
self.assertEqual(worker.process_tracker, None)
self.assertEqual(worker.doc_service_client, None)
self.assertEqual(worker.services_not_available, self.sna)
self.assertEqual(worker.sleep_change_value, None)
self.assertEqual(worker.delay, 15)
self.assertEqual(worker.exit, False)
worker.shutdown()
self.assertEqual(worker.exit, True)
del worker
@requests_mock.Mocker()
@patch('gevent.sleep')
def test_successful_upload(self, mrequest, gevent_sleep):
gevent_sleep.side_effect = custom_sleep
mrequest.post(self.url, json=self.stat_200(), status_code=200)
self.upload_to_doc_service_queue.put(self.data)
self.assertItemsEqual(self.process_tracker.processing_items.keys(), [item_key(self.tender_id, self.award_id)])
self.assertEqual(self.upload_to_doc_service_queue.qsize(), 1)
self.shutdown_when_done(self.worker)
self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty')
self.assertEqual(self.upload_to_tender_queue.qsize(), 1, 'Queue should be have 1 element')
self.assertEqual(mrequest.call_count, 1)
self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload')
self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID'])
self.assertItemsEqual(self.process_tracker.processing_items.keys(), [item_key(self.tender_id, self.award_id)])
@requests_mock.Mocker()
@patch('gevent.sleep')
def test_retry_doc_service(self, mrequest, gevent_sleep):
gevent_sleep.side_effect = custom_sleep
doc_service_client = DocServiceClient(host='127.0.0.1', port='80', user='', password='')
mrequest.post(self.url, [{'text': '', 'status_code': 401} for _ in range(6)] + [
{'json': {'data': {'url': 'test url',
'format': 'application/yaml',
'hash': 'md5:9a0364b9e99bb480dd25e1f0284c8555',
'title': file_name}},
'status_code': 200}])
self.upload_to_doc_service_queue.put(self.data)
self.assertItemsEqual(self.process_tracker.processing_items.keys(), [item_key(self.tender_id, self.award_id)])
self.assertEqual(self.upload_to_doc_service_queue.qsize(), 1)
self.shutdown_when_done(self.worker)
self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty')
self.assertEqual(self.upload_to_tender_queue.qsize(), 1, 'Queue should be have 1 element')
self.assertEqual(mrequest.call_count, 7)
self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload')
self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID'])
@requests_mock.Mocker()
@patch('gevent.sleep')
def test_request_failed(self, mrequest, gevent_sleep):
gevent_sleep.side_effect = custom_sleep
mrequest.post(self.url, json=self.stat_200(), status_code=200)
self.upload_to_doc_service_queue.put(self.data)
self.shutdown_when_done(self.worker)
self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty')
self.assertEqual(self.upload_to_tender_queue.get(), self.data)
self.assertEqual(self.process_tracker.processing_items, {item_key(self.tender_id, self.award_id): 1})
self.assertEqual(mrequest.call_count, 1)
self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload')
self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID'])
@requests_mock.Mocker()
@patch('gevent.sleep')
def test_request_failed_item_status_change(self, mrequest, gevent_sleep):
gevent_sleep.side_effect = custom_sleep
mrequest.post(self.url, json=self.stat_200(), status_code=200)
self.process_tracker.set_item(self.tender_id, self.qualification_id, 1)
self.upload_to_doc_service_queue.put(self.data)
self.upload_to_doc_service_queue.put(self.qualification_data)
self.shutdown_when_done(self.worker)
self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty')
self.assertEqual(self.upload_to_tender_queue.get(), self.data)
self.assertEqual(self.upload_to_tender_queue.get(), self.qualification_data)
self.assertEqual(mrequest.call_count, 2)
self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload')
self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID'])
self.assertEqual(self.process_tracker.processing_items,
{item_key(self.tender_id, self.award_id): 1,
item_key(self.tender_id, self.qualification_id): 1})
@requests_mock.Mocker()
@patch('gevent.sleep')
def test_processing_items(self, mrequest, gevent_sleep):
gevent_sleep.side_effect = custom_sleep
mrequest.post(self.url, [{'json': self.stat_200(), 'status_code': 200} for _ in range(2)])
self.process_tracker.set_item(self.tender_id, self.award_id, 2)
self.upload_to_doc_service_queue.put(self.data)
self.upload_to_doc_service_queue.put(self.data)
self.shutdown_when_done(self.worker)
self.assertEqual(self.upload_to_tender_queue.get(), self.data)
self.assertEqual(self.upload_to_tender_queue.get(), self.data)
self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload')
self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID'])
@requests_mock.Mocker()
@patch('gevent.sleep')
def test_upload_to_doc_service_queue_loop_exit(self, mrequest, gevent_sleep):
""" Test LoopExit for upload_to_doc_service_queue """
gevent_sleep.side_effect = custom_sleep
self.process_tracker.set_item(self.tender_id, self.award_id, 2)
self.worker.upload_to_doc_service_queue = MagicMock()
self.worker.upload_to_doc_service_queue.peek.side_effect = generate_answers(
answers=[LoopExit(), self.data, self.data], default=LoopExit())
mrequest.post(self.url, [{'json': self.stat_200(), 'status_code': 200} for _ in range(2)])
self.worker.start()
sleep(1)
self.assertEqual(self.upload_to_tender_queue.get(), self.data)
self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID'])
self.assertIsNotNone(mrequest.request_history[1].headers['X-Client-Request-ID'])
self.assertEqual(self.process_tracker.processing_items, {item_key(self.tender_id, self.award_id): 2})
@requests_mock.Mocker()
@patch('gevent.sleep')
def test_retry_upload_to_doc_service_queue_loop_exit(self, mrequest, gevent_sleep):
""" Test LoopExit for retry_upload_to_doc_service_queue """
gevent_sleep.side_effect = custom_sleep
mrequest.post(self.url, [{'json': self.stat_200(), 'status_code': 200} for _ in range(2)])
self.process_tracker.set_item(self.tender_id, self.award_id, 2)
self.worker.retry_upload_to_doc_service_queue = MagicMock()
self.worker.retry_upload_to_doc_service_queue.peek.side_effect = generate_answers(
answers=[LoopExit(), self.data, self.data], default=LoopExit())
self.worker.start()
sleep(1)
self.worker.shutdown()
self.assertEqual(self.upload_to_tender_queue.get(), self.data)
self.assertEqual(self.process_tracker.processing_items, {item_key(self.tender_id, self.award_id): 2})
self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload')
self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID'])
def test_remove_bad_data(self):
self.worker.upload_to_doc_service_queue = MagicMock(get=MagicMock())
self.worker.process_tracker = MagicMock(update_items_and_tender=MagicMock())
self.worker.remove_bad_data(self.data, Exception("test message"), False)
self.worker.upload_to_doc_service_queue.get.assert_called_once()
self.assertEqual(self.worker.retry_upload_to_doc_service_queue.get(), self.data)
def test_remove_bad_data_retry(self):
self.worker.retry_upload_to_doc_service_queue = MagicMock(get=MagicMock())
self.worker.process_tracker = MagicMock(update_items_and_tender=MagicMock())
with self.assertRaises(Exception):
self.worker.remove_bad_data(self.data, Exception("test message"), True)
self.worker.retry_upload_to_doc_service_queue.get.assert_called_once()
self.worker.process_tracker.update_items_and_tender.assert_called_with(self.data.tender_id, self.data.item_id,
self.document_id)
def test_try_upload_to_doc_service(self):
e = Exception("test error")
self.worker.update_headers_and_upload = MagicMock(side_effect=e)
self.worker.remove_bad_data = MagicMock()
self.worker.try_upload_to_doc_service(self.data, False)
self.worker.update_headers_and_upload.assert_called_once()
self.worker.remove_bad_data.assert_called_once_with(self.data, e, False)
def test_try_upload_to_doc_service_retry(self):
e = Exception("test error")
self.worker.update_headers_and_upload = MagicMock(side_effect=e)
self.worker.remove_bad_data = MagicMock()
self.worker.try_upload_to_doc_service(self.data, True)
self.worker.update_headers_and_upload.assert_called_once()
self.worker.remove_bad_data.assert_called_with(self.data, e, True)
def test_run(self):
self.worker.delay = 1
upload_worker, retry_upload_worker = MagicMock(), MagicMock()
self.worker.upload_worker = upload_worker
self.worker.retry_upload_worker = retry_upload_worker
with patch.object(self.worker, 'exit', AlmostAlwaysFalse()):
self.worker._run()
self.assertEqual(self.worker.upload_worker.call_count, 1)
self.assertEqual(self.worker.retry_upload_worker.call_count, 1)
@patch('gevent.killall')
def test_run_exception(self, killlall):
self.worker.delay = 1
self.worker._start_jobs = MagicMock(return_value={"a": 1})
self.worker.check_and_revive_jobs = MagicMock(side_effect=Exception("test error"))
self.worker._run()
killlall.assert_called_once_with([1], timeout=5)
@patch('gevent.killall')
@patch('gevent.sleep')
def test_run_exception(self, gevent_sleep, killlall):
gevent_sleep.side_effect = custom_sleep
self.worker._start_jobs = MagicMock(return_value={"a": 1})
self.worker.check_and_revive_jobs = MagicMock(side_effect=Exception("test error"))
self.worker._run()
killlall.assert_called_once_with([1], timeout=5)
|
nilq/baby-python
|
python
|
'''
该模块是控制流实例。
控制流语句如下:
if
while
for
break
continue
'''
def guessnumber():
'''猜数字游戏'''
number = 23
running = True
while running:
guess = int(input('猜整数:'))
if guess == number:
print('恭喜您,猜中啦!')
running = False
elif guess < number:
print('No,小啦')
else:
print('No, 大啦')
else:
print('猜数字结束!')
guessnumber()
print('游戏结束')
|
nilq/baby-python
|
python
|
import pytest
from ipypublish.filters_pandoc.utils import apply_filter
from ipypublish.filters_pandoc import prepare_labels
from ipypublish.filters_pandoc import format_label_elements
def test_math_span_latex():
in_json = {"blocks": [{"t": "Para", "c": [
{"t": "Span", "c": [
["a", ["labelled-Math"], [["b", "2"]]],
[{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]}
]}], "pandoc-api-version": [1, 17, 5, 1],
"meta": {
"$$references": {"t": "MetaMap", "c": {
"a": {"t": "MetaMap", "c": {
"type": {"t": "MetaString", "c": "Math"},
"number": {"t": "MetaString", "c": "1"}}}}}}}
out_string = apply_filter(
in_json, format_label_elements.main, "latex", in_format="json")
assert out_string.strip() == "\n".join([
r"\begin{equation}a=1\label{a}\end{equation}"
])
def test_math_span_rst():
in_json = {"blocks": [{"t": "Para", "c": [
{"t": "Span", "c": [
["a", ["labelled-Math"], [["b", "2"]]],
[{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]}
]}], "pandoc-api-version": [1, 17, 5, 1],
"meta": {
"$$references": {"t": "MetaMap", "c": {
"a": {"t": "MetaMap", "c": {
"type": {"t": "MetaString", "c": "Math"},
"number": {"t": "MetaString", "c": "1"}}}}}}}
out_string = apply_filter(
in_json, format_label_elements.main, "rst", in_format="json")
assert out_string.strip() == "\n".join([
".. math::",
" :nowrap:",
" :label: a",
"",
r" \begin{equation}a=1\end{equation}"
])
@pytest.mark.skip(
reason="there's an issue with pandoc outputting unicode in '/em> = 1'")
def test_math_span_html():
in_json = {"blocks": [{"t": "Para", "c": [
{"t": "Span", "c": [
["a", ["labelled-Math"], [["b", "2"]]],
[{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]}
]}], "pandoc-api-version": [1, 17, 5, 1],
"meta": {
"$$references": {"t": "MetaMap", "c": {
"a": {"t": "MetaMap", "c": {
"type": {"t": "MetaString", "c": "Math"},
"number": {"t": "MetaString", "c": "1"}}}}}}}
out_string = apply_filter(
in_json, format_label_elements.main, "html", in_format="json")
assert out_string.strip() == "\n".join([
'<p><a id="a" class="anchor-link" name="#a">'
'<br />'
'<span class="math display"><em>a</em> = 1</span>'
'<br />'
'</a></p>'
])
def test_math_md_to_rst():
in_str = [
"$$a = b$$ {#eq:id1}",
"$$c &= d \\\\ other &= e$$ {#a env=align .unnumbered}"
]
out_string = apply_filter(
in_str, [prepare_labels.main, format_label_elements.main],
in_format="markdown", out_format="rst")
assert out_string.strip() == "\n".join([
".. math::",
" :nowrap:",
" :label: eq:id1",
"",
r" \begin{equation}a = b\end{equation}",
"",
"",
"",
".. math::",
" :nowrap:",
" :label: a",
"",
r" \begin{align*}c &= d \\ other &= e\end{align*}"
])
def test_image_html():
"""
"""
# "{#label1 .class-name a=5}"
in_json = (
{"blocks": [
{"t": "Para", "c": [
{"t": "Image", "c": [
["label1",
["class-name"],
[["a", "5"]]],
[{"t": "Str", "c": "a"},
{"t": "Space"}, {"t": "Str", "c": "title"}],
["path/to/image.png", "fig:"]]}]}],
"pandoc-api-version": [1, 17, 5, 1], "meta": {}}
)
out_string = apply_filter(
in_json, format_label_elements.main, "html", in_format="json")
assert out_string.strip() == "\n".join([
'<p><a id="label1" class="anchor-link" name="#label1">'
'<img src="path/to/image.png" title="fig:" alt="a title" id="label1" '
'class="class-name" data-a="5" />'
'</a></p>'
])
def test_image_rst():
"""
"""
# "{#label1 .class-name a=5}"
in_json = (
{"blocks": [
{"t": "Para", "c": [
{"t": "Image", "c": [
["label1",
["class-name"],
[["a", "5"]]],
[{"t": "Str", "c": "a"},
{"t": "Space"}, {"t": "Str", "c": "title"}],
["path/to/image.png", "fig:"]]}]}],
"pandoc-api-version": [1, 17, 5, 1], "meta": {}}
)
out_string = apply_filter(
in_json, format_label_elements.main, "rst", in_format="json")
assert out_string.strip() == "\n".join([
".. figure:: path/to/image.png",
" :alt: a title",
" :figclass: class-name",
" :name: label1",
"",
" a title"
])
def test_image_latex():
"""
"""
# "{#label1 .class-name a=5}"
in_json = (
{"blocks": [
{"t": "Para", "c": [
{"t": "Image", "c": [
["label1",
["class-name"],
[["a", "5"]]],
[{"t": "Str", "c": "a"},
{"t": "Space"}, {"t": "Str", "c": "title"}],
["path/to/image.png", "fig:"]]}]}],
"pandoc-api-version": [1, 17, 5, 1], "meta": {}}
)
out_string = apply_filter(
in_json, format_label_elements.main, "latex", in_format="json")
assert out_string.strip() == "\n".join([
r"\begin{figure}[]",
r"\hypertarget{label1}{%",
r"\begin{center}",
r"\adjustimage{max size={0.9\linewidth}{0.9\paperheight},}"
r"{path/to/image.png}",
r"\end{center}",
r"\caption{a title}\label{label1}",
"}",
r"\end{figure}"
])
def test_table_html():
"""
Some text
a b
- -
1 2
4 5
Table: Caption. {#tbl:id}
"""
in_json = (
{
"pandoc-api-version": [1, 17, 5, 1],
"meta": {
"$$references": {"t": "MetaMap", "c": {
"tbl:id": {"t": "MetaMap", "c": {
"type": {"t": "MetaString", "c": "Table"},
"number": {"t": "MetaString", "c": "1"}}}}}},
"blocks": [{"t": "Para", "c": [
{"t": "Str", "c": "Some"},
{"t": "Space"},
{"t": "Str", "c": "text"}]},
{"t": "Div", "c": [
["tbl:id", ["labelled-Table"], []],
[{"t": "Table", "c": [
[{"t": "Str", "c": "Caption."},
{"t": "Space"}],
[{"t": "AlignDefault"},
{"t": "AlignDefault"}],
[0, 0],
[[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}],
[{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}]],
[[[{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}],
[{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}]],
[[{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}],
[{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}]
]]]}]]}]}
)
out_string = apply_filter(
in_json, format_label_elements.main, "html", in_format="json")
assert out_string.strip() == "\n".join([
'<p>Some text</p>',
'<a id="tbl:id" class="anchor-link" name="#tbl:id">',
'<table>',
'<caption>Caption. </caption>',
'<thead>',
'<tr class="header">',
'<th>a</th>',
'<th>b</th>',
'</tr>',
'</thead>',
'<tbody>',
'<tr class="odd">',
'<td>1</td>',
'<td>2</td>',
'</tr>',
'<tr class="even">',
'<td>4</td>',
'<td>5</td>',
'</tr>',
'</tbody>',
'</table>',
'</a>'])
def test_table_rst():
"""
Some text
a b
- -
1 2
4 5
Table: Caption. {#tbl:id}
"""
in_json = (
{
"pandoc-api-version": [1, 17, 5, 1],
"meta": {
"$$references": {"t": "MetaMap", "c": {
"tbl:id": {"t": "MetaMap", "c": {
"type": {"t": "MetaString", "c": "Table"},
"number": {"t": "MetaString", "c": "1"}}}}}},
"blocks": [{"t": "Para", "c": [
{"t": "Str", "c": "Some"},
{"t": "Space"},
{"t": "Str", "c": "text"}]},
{"t": "Div", "c": [
["tbl:id", ["labelled-Table"], []],
[{"t": "Table", "c": [
[{"t": "Str", "c": "Caption."},
{"t": "Space"}],
[{"t": "AlignDefault"},
{"t": "AlignDefault"}],
[0, 0],
[[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}],
[{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}]],
[[[{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}],
[{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}]],
[[{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}],
[{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}]
]]]}]]}]}
)
out_string = apply_filter(
in_json, format_label_elements.main, "rst", in_format="json")
assert out_string.strip().splitlines()[0:3] == [
'Some text', '', '.. _`tbl:id`:'
]
|
nilq/baby-python
|
python
|
from lxml import etree
import glob
class Plugin:
"""Class that defines a plugin with :
- his name
- his description
- his version
- his state..."""
def __init__(self, file, name, desc, version, state):
self.file = file
self.name = name
self.desc = desc
self.version = version
self.state = state
def CreatePlugin(p, xml):
"""Function that loads the plugin."""
tree = etree.parse(xml)
root = tree.getroot()
file = p
name = root[0].text
desc = root[1].text
version = root[2].text
state = str2bool(root[3].text)
plugin = Plugin(file, name, desc, version, state)
return plugin
def LoadPlugins():
"""Function that loads the plugin directory and create plugin objects."""
plugs = glob.glob("plugins/*.py")
plugins = []
for p in plugs:
p = p.replace(".py","")
p = p.replace("plugins\\","")
if p == "__init__":
pass
if p == "PluginLoader":
pass
else:
xml = "plugins/{p}.xml".format(p=p)
try:
plg = CreatePlugin(p, xml)
plugins.append(plg)
except:
pass
return plugins
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1", "oui", "vrai", "activé", "active", "on", "enable", "enabled")
|
nilq/baby-python
|
python
|
# type: ignore
import os
import signal
import sys
import time
def signal_handler(sig, frame):
print("You pressed Ctrl+C!")
time.sleep(1)
with open(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"tests",
"signal_gracefully_terminated",
),
"w",
) as f:
f.write("blah")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
print("Press Ctrl+C")
signal.pause()
|
nilq/baby-python
|
python
|
import LagInput
import os
def readInput(filename):
# INPUT: string filename
# OUTPUT: LagInput lagin
# This function reads from the input file and output the LagInput type lagin containing all the input values
os.chdir("../input")
fid = open(filename,"r")
for line in fid.readlines():
# Line Parsed
lp = line.split();
if lp[1] == 'None':
print("Invalid input. Using default values")
IniPos = 0.0
IniVel = 0.0
IniTemp = 0.0
DampCoef = 0.0
dt = 1
ttot = 10
break
if not not lp: # Handle empty strings. Python is weird...
if lp[0] == "initial_position":
IniPos = float(lp[1])
elif lp[0] == "initial_velocity":
IniVel = float(lp[1])
elif lp[0] == "temperature":
IniTemp = float(lp[1])
elif lp[0] == "damping_coefficient":
DampCoef = float(lp[1]);
elif lp[0] == "time_step":
dt = float(lp[1])
elif lp[0] == "total_time":
ttot = float(lp[1]);
#ttot is actually total time step which is equal to total_time/dt
ttot = int(ttot/dt)
os.chdir("../src")
# print(IniPos)
# print(IniVel)
# print(IniTemp)
# print(DampCoef)
# print(dt)
# print(ttot)
laginput = LagInput.get_LagInput(IniPos, IniVel, IniTemp, DampCoef, dt, ttot)
return laginput
def writeInput(args,filename):
if not os.path.isdir("../input"):
os.mkdir("../input")
os.chdir("../input")
fidin = open(filename,"w")
for arg in vars(args):
line = arg, getattr(args, arg)
fidin.write("%s %s\n"%(arg, getattr(args,arg)))
os.chdir("../src")
|
nilq/baby-python
|
python
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
import MatrixOperations as MO
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class Direct(BaseMyPC):
def __init__(self, W, A):
print 333
self.W = W
self.A = A
IS = MO.IndexSet(W)
self.u_is = IS[0]
self.p_is = IS[1]
def create(self, pc):
self.diag = None
kspL = PETSc.KSP()
kspL.create(comm=PETSc.COMM_WORLD)
pc = kspL.getPC()
kspL.setType('preonly')
pc.setType('lu')
OptDB = PETSc.Options()
# OptDB['pc_factor_shift_amount'] = 1
OptDB['pc_factor_mat_ordering_type'] = 'rcm'
OptDB['pc_factor_mat_solver_package'] = 'mumps'
kspL.setFromOptions()
self.kspL = kspL
kspM = PETSc.KSP()
kspM.create(comm=PETSc.COMM_WORLD)
pc = kspM.getPC()
kspM.setType('preonly')
pc.setType('lu')
kspM.setFromOptions()
self.kspM = kspM
# print kspM.view()
def setUp(self, pc):
A, P = pc.getOperators()
L = A.getSubMatrix(self.u_is,self.u_is)
self.kspM.setOperators(self.A,self.A)
self.kspL.setOperators(L,L)
def apply(self, pc, x, y):
# print 1000
# self.kspL.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
# print 111
self.kspM.solve(x2, y2)
self.kspL.solve(x1, y1)
y.array = (np.concatenate([y1.array, y2.array]))
class Approx(object):
def __init__(self, W, A):
self.W = W
self.A = A
IS = MO.IndexSet(W)
self.u_is = IS[0]
self.p_is = IS[1]
def create(self, pc):
kspL = PETSc.KSP()
kspL.create(comm=PETSc.COMM_WORLD)
pcL = kspL.getPC()
kspL.setType('preonly')
pcL.setType('hypre')
# kspL.max_it = 1
kspL.setFromOptions()
self.kspL = kspL
kspM = PETSc.KSP()
kspM.create(comm=PETSc.COMM_WORLD)
pcM = kspM.getPC()
kspM.setType('preonly')
pcM.setType('hypre')
kspM.setFromOptions()
self.kspM = kspM
def setUp(self, pc):
A, P = pc.getOperators()
L = A.getSubMatrix(self.u_is,self.u_is)
M = P.getSubMatrix(self.p_is,self.p_is)
self.kspM.setOperators(M,M)
self.kspL.setOperators(L,L)
def apply(self, pc, x, y):
# self.kspL.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
self.kspL.solve(x1, y1)
self.kspM.solve(x2, y2)
y.array = (np.concatenate([y1.array, y2.array]))
class ApproxSplit(object):
def __init__(self, W, A, M):
self.W = W
self.A = A
self.M = M
IS = MO.IndexSet(W)
self.u_is = IS[0]
self.p_is = IS[1]
def create(self, pc):
self.diag = None
kspL = PETSc.KSP()
kspL.create(comm=PETSc.COMM_WORLD)
pcL = kspL.getPC()
kspL.setType('preonly')
pcL.setType('ml')
# kspL.max_it = 1
kspL.setFromOptions()
self.kspL = kspL
kspM = PETSc.KSP()
kspM.create(comm=PETSc.COMM_WORLD)
pcM = kspM.getPC()
kspM.setType('cg')
pcM.setType('jacobi')
kspM.setFromOptions()
self.kspM = kspM
def setUp(self, pc):
self.kspM.setOperators(self.M,self.M)
self.kspL.setOperators(self.A,self.A)
def apply(self, pc, x, y):
# self.kspL.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
self.kspL.solve(x1, y1)
self.kspM.solve(x2, y2)
y.array = (np.concatenate([y1.array, y2.array]))
class MHDApprox(object):
def __init__(self, W, kspA, kspQ):
self.W = W
self.kspA = kspA
self.kspQ = kspQ
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def apply(self, pc, x, y):
# self.kspL.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
self.kspQ.solve(x2, y2)
self.kspA.solve(x1, y1)
y.array = (np.concatenate([y1.array, y2.array]))
def ApproxFunc(W, A, x, y):
IS = MO.IndexSet(W)
u_is = IS[0]
p_is = IS[1]
diag = None
kspL = PETSc.KSP()
kspL.create(comm=PETSc.COMM_WORLD)
pcL = kspL.getPC()
kspL.setType('preonly')
pcL.setType('gamg')
# kspL.max_it = 1
kspL.setFromOptions()
kspM = PETSc.KSP()
kspM.create(comm=PETSc.COMM_WORLD)
pcM = kspM.getPC()
kspM.setType('cg')
pcM.setType('jacobi')
kspM.setFromOptions()
L = A.getSubMatrix(u_is,u_is)
M = A.getSubMatrix(p_is,p_is)
kspM.setOperators(M,M)
kspL.setOperators(L,L)
# kspL.setOperators(self.B)
x1 = x.getSubVector(u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(p_is)
y2 = x2.duplicate()
kspL.solve(x1, y1)
kspM.solve(x2, y2)
y.array = (np.concatenate([y1.array, y2.array]))
def ApproxSplitFunc(W, A, M,x,y):
W = W
A = A
M = M
IS = MO.IndexSet(W)
u_is = IS[0]
p_is = IS[1]
diag = None
kspL = PETSc.KSP()
kspL.create(comm=PETSc.COMM_WORLD)
pcL = kspL.getPC()
kspL.setType('preonly')
pcL.setType('gamg')
# kspL.max_it = 1
kspL.setFromOptions()
kspM = PETSc.KSP()
kspM.create(comm=PETSc.COMM_WORLD)
pcM = kspM.getPC()
kspM.setType('cg')
pcM.setType('jacobi')
kspM.setFromOptions()
kspM.setOperators(M,M)
kspL.setOperators(A,A)
x1 = x.getSubVector(u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(p_is)
y2 = x2.duplicate()
kspL.solve(x1, y1)
kspM.solve(x2, y2)
y.array = (np.concatenate([y1.array, y2.array]))
|
nilq/baby-python
|
python
|
"""$ fio distrib"""
import json
import logging
import click
import cligj
from fiona.fio import helpers, with_context_env
@click.command()
@cligj.use_rs_opt
@click.pass_context
@with_context_env
def distrib(ctx, use_rs):
"""Distribute features from a collection.
Print the features of GeoJSON objects read from stdin.
"""
logger = logging.getLogger(__name__)
stdin = click.get_text_stream('stdin')
try:
source = helpers.obj_gen(stdin)
for i, obj in enumerate(source):
obj_id = obj.get('id', 'collection:' + str(i))
features = obj.get('features') or [obj]
for j, feat in enumerate(features):
if obj.get('type') == 'FeatureCollection':
feat['parent'] = obj_id
feat_id = feat.get('id', 'feature:' + str(i))
feat['id'] = feat_id
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(feat))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import base64
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
from plant_disease_classification_api.models import ClassficationRequestItem
from plant_disease_classification_api.ml.plant_disease_classifier import (
PlantDiseaseClassifier,
)
app = FastAPI()
@app.get("/")
def read_root():
html_content = """
<html>
<head>
<title>Plant Disease Classification API</title>
</head>
<body>
<h1>Welcome to Plant Disease Classification API</h1>
<h2><a href="/docs">Documentation</a></h2>
</body>
</html>
"""
return HTMLResponse(content=html_content, status_code=200)
@app.post("/classify")
async def classify(requestItem: ClassficationRequestItem):
if len(requestItem.modelName) == 0:
return {"error": "Please provide name of model you want to use."}
if len(requestItem.data) == 0:
return {"error": "Please provide Base64 encoded image data."}
dir_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(dir_path, "models", requestItem.modelName)
if os.path.exists(path):
plant_disease_classifier = PlantDiseaseClassifier(model_path=path)
image_data = base64.b64decode(requestItem.data)
result = plant_disease_classifier.classify(image_data=image_data)
return {"result": result}
else:
return {"error": "ML Model not found!"}
|
nilq/baby-python
|
python
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from collections import Counter
import operator
import re
import os
import gc
import gensim
from gensim import corpora
from nltk.corpus import stopwords
import string
from copy import deepcopy
from sklearn.manifold import TSNE
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from nltk import word_tokenize, ngrams
from sklearn.cross_validation import KFold
from sklearn import ensemble
from sklearn.metrics import log_loss
import seaborn as sns
import matplotlib.pyplot as plt
from subprocess import check_output
get_ipython().magic('matplotlib inline')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
pal = sns.color_palette()
color = sns.color_palette()
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_colwidth', -1)
pd.options.mode.chained_assignment = None # default='warn'
words = re.compile(r"\w+",re.I)
stopword = stopwords.words('english')
#prelim data exploration
train = pd.read_csv("train.csv").fillna("")
test = pd.read_csv("test.csv").fillna("")
train.groupby("is_duplicate")['id'].count().plot.bar()
dfs = train[0:2500]
dfs.groupby("is_duplicate")['id'].count().plot.bar()
dfq1, dfq2 = dfs[['qid1', 'question1']], dfs[['qid2', 'question2']]
dfq1.columns = ['qid1', 'question']
dfq2.columns = ['qid2', 'question']
dfqa = pd.concat((dfq1, dfq2), axis=0).fillna("")
nrows_for_q1 = dfqa.shape[0]/2
all_ques_df = pd.DataFrame(pd.concat([train['question1'], train['question2']]))
all_ques_df.columns = ["questions"]
all_ques_df["num_of_words"] = all_ques_df["questions"].apply(lambda x : len(str(x).split()))
cnt_srs = all_ques_df['num_of_words'].value_counts()
plt.figure(figsize=(12,6))
sns.barplot(cnt_srs.index, cnt_srs.values, alpha=0.8, color=color[0])
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel('Number of words in the question', fontsize=12)
plt.xticks(rotation='vertical')
plt.show()
all_ques_df["num_of_chars"] = all_ques_df["questions"].apply(lambda x : len(str(x)))
cnt_srs = all_ques_df['num_of_chars'].value_counts()
plt.figure(figsize=(50,8))
sns.barplot(cnt_srs.index, cnt_srs.values, alpha=0.8, color=color[3])
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel('Number of characters in the question', fontsize=12)
plt.xticks(rotation='vertical')
plt.show()
del all_ques_df
train_qs = pd.Series(train['question1'].tolist() + train['question2'].tolist()).astype(str)
test_qs = pd.Series(test['question1'].tolist() + test['question2'].tolist()).astype(str)
dist_train = train_qs.apply(len)
dist_test = test_qs.apply(len)
plt.figure(figsize=(15, 10))
plt.hist(dist_train, bins=200, range=[0, 200], color=pal[2], normed=True, label='train')
plt.hist(dist_test, bins=200, range=[0, 200], color=pal[1], normed=True, alpha=0.5, label='test')
plt.title('Normalised histogram of character count in questions', fontsize=15)
plt.legend()
plt.xlabel('Number of characters', fontsize=15)
plt.ylabel('Probability', fontsize=15)
print('mean-train {:.2f} std-train {:.2f} mean-test {:.2f} std-test {:.2f} max-train {:.2f} max-test {:.2f}'.format(dist_train.mean(),
dist_train.std(), dist_test.mean(), dist_test.std(), dist_train.max(), dist_test.max()))
##########################################
#transform questions with Tf-Tfidf
mq1 = TfidfVectorizer().fit_transform(dfqa['question'].values)
diff_encodings = mq1[::2] - mq1[1::2]
import nltk
STOP_WORDS = nltk.corpus.stopwords.words()
def clean_sentence(val):
regex = re.compile('([^\s\w]|_&*)+')
sentence = regex.sub('', val).lower()
sentence = sentence.split(" ")
for word in list(sentence):
if word in STOP_WORDS:
sentence.remove(word)
sentence = " ".join(sentence)
return sentence
def clean_trainframe(df):
df = df.dropna(how="any")
for col in ['question1', 'question2']:
df[col] = df[col].apply(clean_sentence)
return df
def build_corpus(df):
corpus = []
for col in ['question1', 'question2']:
for sentence in df[col].iteritems():
word_list = sentence[1].split(" ")
corpus.append(word_list)
return corpus
df = clean_trainframe(train)
corpus = build_corpus(df)
from gensim.models import word2vec
model = word2vec.Word2Vec(corpus, size=100, window=20, min_count=200, workers=4)
def tsne_plot(model):
labels = []
tokens = []
for word in model.wv.vocab:
tokens.append(model[word])
labels.append(word)
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
tsne_plot(model)
from collections import Counter
import matplotlib.pyplot as plt
import operator
def eda(df):
print ("Duplicate Count = %s , Non Duplicate Count = %s"
%(df.is_duplicate.value_counts()[1],df.is_duplicate.value_counts()[0]))
question_ids_combined = df.qid1.tolist() + df.qid2.tolist()
print ("Unique Questions = %s" %(len(np.unique(question_ids_combined))))
question_ids_counter = Counter(question_ids_combined)
sorted_question_ids_counter = sorted(question_ids_counter.items(), key=operator.itemgetter(1))
question_appearing_more_than_once = [i for i in question_ids_counter.values() if i > 1]
print ("Count of Quesitons appearing more than once = %s" %(len(question_appearing_more_than_once)))
eda(train)
def eda(df):
question_ids_combined = df.qid1.tolist() + df.qid2.tolist()
print ("Unique Questions = %s" %(len(np.unique(question_ids_combined))))
question_ids_counter = Counter(question_ids_combined)
sorted_question_ids_counter = sorted(question_ids_counter.items(), key=operator.itemgetter(1))
question_appearing_more_than_once = [i for i in question_ids_counter.values() if i > 1]
print ("Count of Quesitons appearing more than once = %s" %(len(question_appearing_more_than_once)))
eda(test)
import re
import gensim
from gensim import corpora
from nltk.corpus import stopwords
words = re.compile(r"\w+",re.I)
stopword = stopwords.words('english')
def tokenize_questions(df):
question_1_tokenized = []
question_2_tokenized = []
for q in df.question1.tolist():
question_1_tokenized.append([i.lower() for i in words.findall(q) if i not in stopword])
for q in df.question2.tolist():
question_2_tokenized.append([i.lower() for i in words.findall(q) if i not in stopword])
df["Question_1_tok"] = question_1_tokenized
df["Question_2_tok"] = question_2_tokenized
return df
def train_dictionary(df):
questions_tokenized = df.Question_1_tok.tolist() + df.Question_2_tok.tolist()
dictionary = corpora.Dictionary(questions_tokenized)
dictionary.filter_extremes(no_below=5, no_above=0.5, keep_n=10000000)
dictionary.compactify()
return dictionary
df_train = tokenize_questions(train)
dictionary = train_dictionary(df_train)
print ("No of words in the dictionary = %s" %len(dictionary.token2id))
def get_vectors(df, dictionary):
question1_vec = [dictionary.doc2bow(text) for text in df.Question_1_tok.tolist()]
question2_vec = [dictionary.doc2bow(text) for text in df.Question_2_tok.tolist()]
question1_csc = gensim.matutils.corpus2csc(question1_vec, num_terms=len(dictionary.token2id))
question2_csc = gensim.matutils.corpus2csc(question2_vec, num_terms=len(dictionary.token2id))
return question1_csc.transpose(),question2_csc.transpose()
q1_csc, q2_csc = get_vectors(df_train, dictionary)
df_test = tokenize_questions(test)
dictionary = train_dictionary(df_test)
q1_csc, q2_csc = get_vectors(df_test, dictionary)
from sklearn.metrics.pairwise import cosine_similarity as cs
def get_cosine_similarity(q1_csc, q2_csc):
cosine_sim = []
for i,j in zip(q1_csc, q2_csc):
sim = cs(i,j)
cosine_sim.append(sim[0][0])
return cosine_sim
cosine_sim = get_cosine_similarity(q1_csc, q2_csc)
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.linear_model import LogisticRegression as LR
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.pipeline import Pipeline
np.random.seed(10)
def train_rfc(X,y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
svm_models = [('svm', SVC(verbose=1, shrinking=False))]
svm_pipeline = Pipeline(svm_models)
svm_params = {'svm__kernel' : ['rbf'],
'svm__C' : [0.01,0.1,1],
'svm__gamma' :[0.1,0.2,0.4],
'svm__tol' :[0.001,0.01,0.1],
'svm__class_weight' : [{1:0.8,0:0.2}]}
rfc_models = [('rfc', RFC())]
rfc_pipeline = Pipeline(rfc_models)
rfc_params = {'rfc__n_estimators' : [40],
'rfc__max_depth' : [40],
'rfc__min_samples_leaf' : [50]}
lr_models = [('lr', LR(verbose=1))]
lr_pipeline = Pipeline(lr_models)
lr_params = {'lr__C': [0.1, 0.01],
'lr__tol': [0.001,0.01],
'lr__max_iter': [200,400],
'lr__class_weight' : [{1:0.8,0:0.2}]}
gbc_models = [('gbc', GBC(verbose=1))]
gbc_pipeline = Pipeline(gbc_models)
gbc_params = {'gbc__n_estimators' : [100,200, 400, 800],
'gbc__max_depth' : [40, 80, 160, 320],
'gbc__learning_rate' : [0.01,0.1]}
grid = zip([svm_pipeline, rfc_pipeline, lr_pipeline, gbc_pipeline],
[svm_params, rfc_params, lr_params, gbc_params])
grid = zip([rfc_pipeline],
[rfc_params])
best_clf = None
for model_pipeline, param in grid:
temp = GridSearchCV(model_pipeline, param_grid=param, cv=4, scoring='f1')
temp.fit(X_train, y_train)
if best_clf is None:
best_clf = temp
else:
if temp.best_score_ > best_clf.best_score_:
best_clf = temp
model_details = {}
model_details["CV Accuracy"] = best_clf.best_score_
model_details["Model Parameters"] = best_clf.best_params_
model_details["Test Data Score"] = best_clf.score(X_test, y_test)
model_details["F1 score"] = f1_score(y_test, best_clf.predict(X_test))
model_details["Confusion Matrix"] = str(confusion_matrix(y_test, best_clf.predict(X_test)))
return best_clf, model_details
X = np.array(cosine_sim).reshape(-1,1)
y = df_train.is_duplicate
clf, model_details = train_rfc(X,y)
print (model_details)
|
nilq/baby-python
|
python
|
from rest_framework.views import APIView
from rest_framework.response import Response
from . import signals
EVENTS = {
'Push Hook': signals.push_hook,
'Tag Push Hook': signals.tag_push_hook,
'Issue Hook': signals.issue_hook,
'Note Hook': signals.note_hook,
'Merge Request Hook': signals.merge_request_hook,
'Wiki Page Hook': signals.wiki_page_hook,
'Pipeline Hook': signals.pipeline_hook,
'Build Hook': signals.build_hook,
}
def get_event_header(request):
return request.META.get('HTTP_X_GITLAB_EVENT', b'')
class HookEvent(APIView):
queryset = None
permission_classes = ()
def send_signals(self, request, _format=None):
event = get_event_header(request)
if event not in EVENTS:
return Response({}, 404)
EVENTS[event].send(sender=None, payload=request.data)
return Response({}, 200)
def get(self, request, _format=None):
return self.send_signals(request, _format)
def post(self, request, _format=None):
return self.send_signals(request, _format)
|
nilq/baby-python
|
python
|
import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import *
muonSeedsAnalyzer = cms.EDAnalyzer("MuonSeedsAnalyzer",
MuonServiceProxy,
SeedCollection = cms.InputTag("ancientMuonSeed"),
seedPxyzMin = cms.double(-50.0),
pxyzErrMin = cms.double(-100.0),
phiErrMax = cms.double(3.2),
pxyzErrMax = cms.double(100.0),
RecHitBin = cms.int32(25),
etaErrMin = cms.double(0.0),
seedPtMin = cms.double(0.0),
seedPxyzBin = cms.int32(100),
ThetaBin = cms.int32(100),
RecHitMin = cms.double(0.0),
EtaMin = cms.double(-3.0),
pErrBin = cms.int32(200),
phiErrBin = cms.int32(160),
EtaMax = cms.double(3.0),
etaErrBin = cms.int32(200),
seedPxyzMax = cms.double(50.0),
ThetaMin = cms.double(0.0),
PhiMin = cms.double(-3.2),
pxyzErrBin = cms.int32(100),
RecHitMax = cms.double(25.0),
ThetaMax = cms.double(3.2),
pErrMin = cms.double(0.0),
EtaBin = cms.int32(100),
pErrMax = cms.double(200.0),
seedPtMax = cms.double(200.0),
seedPtBin = cms.int32(1000),
phiErrMin = cms.double(0.0),
PhiBin = cms.int32(100),
debug = cms.bool(False),
etaErrMax = cms.double(0.5),
PhiMax = cms.double(3.2)
)
|
nilq/baby-python
|
python
|
import unittest
from monocliche.src.Card import Card
from monocliche.src.Deck import Deck
from monocliche.src.actions.DrawCardAction import DrawCardAction
class DrawCardActionTest(unittest.TestCase):
def test_execute(self):
cards = [Card('card1', '', None), Card('card2', '', None)]
deck = Deck(cards)
action = DrawCardAction(deck)
card = action.execute(None)
self.assertEqual('card1', card.title)
card = action.execute(None)
self.assertEqual('card2', card.title)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from bitIO import *
from Element import Element
from PQHeap import PQHeap
import os
class Huffman:
"""
Huffman compression and decompression.
Authors:
- Kian Banke Larsen (kilar20)
- Silas Pockendahl (silch20)
"""
HEADER_SIZE = 1024
def _createHuffmanTree(freqs):
"""
Creates and returns a Huffman tree,
given a map (list) from byte to frequency.
"""
q = PQHeap()
# Build heap with key as freq, value as Node
for byte in range(256):
q.insert(Element(freqs[byte], [byte]))
# Build Huffman tree
for i in range(255): # leave one element
x = q.extractMin()
y = q.extractMin()
freq = x.key + y.key
q.insert(Element(freq, [x.data, y.data]))
# Return root of the tree
return q.extractMin().data
def _createLookupTable(tree):
"""
Create a lookup table for a Huffman tree.
The table (list) maps bytes to a tuple (code, num_of_bits),
where `code` is the compact binary representation,
and `num_of_bits` is the number of bits in the representation.
"""
lookup = [None] * 256
# Function for recursive tree traversal
def recurse(subtree, code, num_of_bits):
if len(subtree) == 1:
# `subtree` is a leaf
lookup[subtree[0]] = (code, num_of_bits)
else:
# Not a leaf, both subtrees must exist
# We are aware that we do not store the huffman codes as strings,
# but this change has been approved by Rolf Fagerberg
recurse(subtree[0], code << 1, num_of_bits + 1) # left => 0
recurse(subtree[1], code << 1 | 1, num_of_bits + 1) # right => 1
# Start recursion
recurse(tree, 0, 0)
return lookup
def compress(input_file, output_file):
"""
Reads `input_file`, applies Huffman compression and writes to `output_file`.
Returns number of bytes read, and number of bytes written to output file.
"""
freqs = [0] * 256
# Not necessary for functionality
bits_written = 1024 * 8 # header size in bits
with open(input_file, "rb") as input_file:
# Count bytes
byte = input_file.read(1)
while byte:
freqs[byte[0]] += 1
byte = input_file.read(1)
tree = Huffman._createHuffmanTree(freqs)
table = Huffman._createLookupTable(tree)
# Count output bits ()
for byte in range(256):
bits_written += table[byte][1] * freqs[byte]
# BitWriter handles padding
with BitWriter(open(output_file, "wb")) as output:
# Write frequency header
for byte in range(256):
output.writeint32bits(freqs[byte])
# Resets the cursor state
input_file.seek(0)
# Encode input file
byte = input_file.read(1)
while byte:
code, bits = table[byte[0]]
byte = input_file.read(1)
# Very similar to `BitWriter._writebits`,
# writes the bits one by one
while bits > 0:
output.writebit((code >> bits-1) & 1)
bits -= 1
# Return bytes read and bytes written
return sum(freqs), (bits_written + 7) // 8
def decompress(input_file, output_file):
"""
Reads `input_file`, applies Huffman decompression and writes to `output_file`.
Returns number of bytes read, and number of bytes written to output file.
"""
# Not necessary for functionality
input_size = os.path.getsize(input_file)
output_length = 0
with BitReader(open(input_file, "rb")) as input_file:
# Read frequence header
freqs = [input_file.readint32bits() for _ in range(256)]
if not input_file.readsucces():
# not enough data for header
raise Exception("Could not read header (too short)")
# Count output bytes
output_length = sum(freqs)
# Frequency table => Huffman tree
tree = Huffman._createHuffmanTree(freqs)
with open(output_file, "wb") as output:
# Repeat for number of characters in output
for _ in range(output_length):
x = tree
# Traverse tree until a leaf/corresponding byte is found
while len(x) == 2:
bit = input_file.readbit()
if not input_file.readsucces():
raise Exception("Not enough data, unexpected EOF")
x = x[bit] # 0 => left, 1 => right
output.write(bytes(x))
# Return bytes read and bytes written
return input_size, output_length
|
nilq/baby-python
|
python
|
# InfiniTag Copyright © 2020 AMOS-5
# Permission is hereby granted,
# free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions: The above copyright notice and this
# permission notice shall be included in all copies or substantial portions
# of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
# uses the config from this folder for the general setup
import config
except:
# we run the testcase / other
pass
import os
import shutil
import pysolr
from pathlib import Path
from urlpath import URL
import json
"""
This file is experimental and was used to setup a local Solr instance.
We have already changed that and setup a remote instance for everybody.
Still this file contains some useful informations on how a Solr core can be setup
remotely.
"""
def get_default_config_dir(solr_home: Path):
return solr_home / "configsets" / "_default" / "conf"
def get_solr_home():
try:
solr_home = Path(os.environ["SOLR_HOME"])
except:
raise ValueError(
"You have not set the SOLR_HOME environment variable!\n"
"export SOLR_HOME='SOLR_ROOT/server/solr'"
)
return solr_home
def print_status(result: dict, corename: str):
if result["responseHeader"]["status"] == 0:
print(f"Core with name '{corename}' created.")
else: # we are maybe good (core exists), or error
print(result["error"]["msg"])
def create_admin(url: URL):
admin_url = url / "admin" / "cores"
admin = pysolr.SolrCoreAdmin(admin_url)
return admin
def create_core(config: dict):
corename = config["corename"]
solr_home = get_solr_home()
default_dir = get_default_config_dir(solr_home)
working_dir = solr_home / corename
try:
shutil.copytree(default_dir, working_dir)
except FileExistsError:
# the core has already been created once,
# we don't bother and use the old config
pass
base_url = URL(config["url"])
admin = create_admin(base_url)
# create a core with default configuration
res = admin.create(corename, working_dir)
res = json.loads(res)
print_status(res, corename)
if __name__ == "__main__":
create_core(config.tag_storage)
|
nilq/baby-python
|
python
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements common shared matrix decompositions that are
used to perform gate decompositions.
"""
import numpy as np
from scipy.linalg import block_diag, sqrtm, schur
from thewalrus.symplectic import sympmat
def williamson(V, rtol=1e-05, atol=1e-08):
r"""Williamson decomposition of positive-definite (real) symmetric matrix.
See https://math.stackexchange.com/questions/1171842/finding-the-symplectic-matrix-in-williamsons-theorem/2682630#2682630
and https://strawberryfields.ai/photonics/conventions/decompositions.html#williamson-decomposition
Args:
V (array[float]): positive definite symmetric (real) matrix
rtol (float): the relative tolerance parameter used in ``np.allclose``
atol (float): the absolute tolerance parameter used in ``np.allclose``
Returns:
tuple[array,array]: ``(Db, S)`` where ``Db`` is a diagonal matrix
and ``S`` is a symplectic matrix such that :math:`V = S^T Db S`
"""
(n, m) = V.shape
if n != m:
raise ValueError("The input matrix is not square")
if not np.allclose(V, V.T, rtol=rtol, atol=atol):
raise ValueError("The input matrix is not symmetric")
if n % 2 != 0:
raise ValueError("The input matrix must have an even number of rows/columns")
n = n // 2
omega = sympmat(n)
vals = np.linalg.eigvalsh(V)
for val in vals:
if val <= 0:
raise ValueError("Input matrix is not positive definite")
Mm12 = sqrtm(np.linalg.inv(V)).real
r1 = Mm12 @ omega @ Mm12
s1, K = schur(r1)
X = np.array([[0, 1], [1, 0]])
I = np.identity(2)
seq = []
# In what follows I construct a permutation matrix p so that the Schur matrix has
# only positive elements above the diagonal
# Also the Schur matrix uses the x_1,p_1, ..., x_n,p_n ordering thus I permute using perm
# to go to the ordering x_1, ..., x_n, p_1, ... , p_n
for i in range(n):
if s1[2 * i, 2 * i + 1] > 0:
seq.append(I)
else:
seq.append(X)
perm = np.array([2 * i for i in range(n)] + [2 * i + 1 for i in range(n)])
p = block_diag(*seq)
Kt = K @ p
Ktt = Kt[:, perm]
s1t = p @ s1 @ p
dd = [1 / s1t[2 * i, 2 * i + 1] for i in range(n)]
Db = np.diag(dd + dd)
S = Mm12 @ Ktt @ sqrtm(Db)
return Db, np.linalg.inv(S).T
|
nilq/baby-python
|
python
|
import telnetlib
import time
OK = 0
ERROR = 1
RESPONSE_DELAY_MS = 100
class AMXNMX(object):
def __init__(self, host, port=50002, response_delay_ms=RESPONSE_DELAY_MS):
self.conn = telnetlib.Telnet(host, port=port)
self.response_delay_sec = response_delay_ms / 1000.
self._initialize()
def _initialize(self):
pass
def _wait_for_response(self):
time.sleep(self.response_delay_sec)
def _send_command(self, cmd):
self.conn.write(cmd + '\n')
self._wait_for_response()
def _send_command_with_check(self, cmd, key, val):
"""
Send a command and check that the response includes
response_dict[key] == val
"""
r = self._send_command_return_response(cmd)
if r[key] == val:
return OK
else:
return ERROR
def _get_response(self):
raw = self.conn.read_very_eager()
lines = raw.split('\r')[0:-1] #Ignore last empty line
r_dict = {}
for line in lines:
key, val = line.split(':',1)
r_dict[key] = val
return r_dict
def _send_command_return_response(self, cmd):
self._send_command(cmd)
return self._get_response()
def get_status(self):
return self._send_command_return_response("getStatus")
class AMXDecoder(AMXNMX):
def hdmi_off(self):
self._send_command_with_check("hdmiOff", "DVIOFF", "on")
def hdmi_on(self):
self._send_command_with_check("hdmiOn", "DVIOFF", "off")
def set_stream(self, stream):
self._send_command_with_check("set:%d" % stream, "STREAM", "%d" % stream)
class AMXEncoder(AMXNMX):
def _initialize(self):
self.stream_id = int(self.get_status()["STREAM"])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.utils import etcd_util
from urllib3.exceptions import ReadTimeoutError, MaxRetryError
try:
import etcd
HAS_ETCD = True
except ImportError:
HAS_ETCD = False
@skipIf(HAS_ETCD is False, 'python-etcd module must be installed.')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EtcdUtilTestCase(TestCase):
'''
Test cases for salt.utils.etcd_util
'''
# 'get_' function tests: 1
@patch('etcd.Client', autospec=True)
def test_read(self, mock):
'''
Test to make sure we interact with etcd correctly
'''
etcd_client = mock.return_value
etcd_return = MagicMock(value='salt')
etcd_client.read.return_value = etcd_return
client = etcd_util.EtcdClient({})
self.assertEqual(client.read('/salt'), etcd_return)
etcd_client.read.assert_called_with('/salt', recursive=False, wait=False, timeout=None)
client.read('salt', True, True, 10, 5)
etcd_client.read.assert_called_with('salt', recursive=True, wait=True, timeout=10, waitIndex=5)
etcd_client.read.side_effect = etcd.EtcdKeyNotFound
self.assertRaises(etcd.EtcdKeyNotFound, client.read, 'salt')
etcd_client.read.side_effect = etcd.EtcdConnectionFailed
self.assertRaises(etcd.EtcdConnectionFailed, client.read, 'salt')
etcd_client.read.side_effect = etcd.EtcdValueError
self.assertRaises(etcd.EtcdValueError, client.read, 'salt')
etcd_client.read.side_effect = ValueError
self.assertRaises(ValueError, client.read, 'salt')
etcd_client.read.side_effect = ReadTimeoutError(None, None, None)
self.assertRaises(etcd.EtcdConnectionFailed, client.read, 'salt')
etcd_client.read.side_effect = MaxRetryError(None, None)
self.assertRaises(etcd.EtcdConnectionFailed, client.read, 'salt')
@patch('etcd.Client')
def test_get(self, mock):
'''
Test if it get a value from etcd, by direct path
'''
client = etcd_util.EtcdClient({})
with patch.object(client, 'read', autospec=True) as mock:
mock.return_value = MagicMock(value='stack')
self.assertEqual(client.get('salt'), 'stack')
mock.assert_called_with('salt', recursive=False)
self.assertEqual(client.get('salt', recurse=True), 'stack')
mock.assert_called_with('salt', recursive=True)
mock.side_effect = etcd.EtcdKeyNotFound()
self.assertEqual(client.get('not-found'), None)
mock.side_effect = etcd.EtcdConnectionFailed()
self.assertEqual(client.get('watching'), None)
# python 2.6 test
mock.side_effect = ValueError
self.assertEqual(client.get('not-found'), None)
mock.side_effect = Exception
self.assertRaises(Exception, client.get, 'some-error')
@patch('etcd.Client')
def test_tree(self, mock):
'''
Test recursive gets
'''
client = etcd_util.EtcdClient({})
with patch.object(client, 'read', autospec=True) as mock:
c1, c2 = MagicMock(), MagicMock()
c1.__iter__.return_value = [
MagicMock(key='/x/a', value='1'),
MagicMock(key='/x/b', value='2'),
MagicMock(key='/x/c', dir=True)]
c2.__iter__.return_value = [
MagicMock(key='/x/c/d', value='3')
]
mock.side_effect = iter([
MagicMock(children=c1),
MagicMock(children=c2)
])
self.assertDictEqual(client.tree('/x'), {'a': '1', 'b': '2', 'c': {'d': '3'}})
mock.assert_any_call('/x')
mock.assert_any_call('/x/c')
mock.side_effect = etcd.EtcdKeyNotFound()
self.assertEqual(client.tree('not-found'), None)
mock.side_effect = ValueError
self.assertEqual(client.tree('/x'), None)
mock.side_effect = Exception
self.assertRaises(Exception, client.tree, 'some-error')
@patch('etcd.Client')
def test_ls(self, mock):
client = etcd_util.EtcdClient({})
with patch.object(client, 'read', autospec=True) as mock:
c1 = MagicMock()
c1.__iter__.return_value = [
MagicMock(key='/x/a', value='1'),
MagicMock(key='/x/b', value='2'),
MagicMock(key='/x/c', dir=True)]
mock.return_value = MagicMock(children=c1)
self.assertEqual(client.ls('/x'), {'/x': {'/x/a': '1', '/x/b': '2', '/x/c/': {}}})
mock.assert_called_with('/x')
mock.side_effect = etcd.EtcdKeyNotFound()
self.assertEqual(client.ls('/not-found'), {})
mock.side_effect = Exception
self.assertRaises(Exception, client.tree, 'some-error')
@patch('etcd.Client', autospec=True)
def test_write(self, mock):
client = etcd_util.EtcdClient({})
etcd_client = mock.return_value
etcd_client.write.return_value = MagicMock(value='salt')
self.assertEqual(client.write('/some-key', 'salt'), 'salt')
etcd_client.write.assert_called_with('/some-key', 'salt', ttl=None, dir=False)
self.assertEqual(client.write('/some-key', 'salt', ttl=5), 'salt')
etcd_client.write.assert_called_with('/some-key', 'salt', ttl=5, dir=False)
etcd_client.write.return_value = MagicMock(dir=True)
self.assertEqual(client.write('/some-dir', 'salt', ttl=0, directory=True), True)
etcd_client.write.assert_called_with('/some-dir', None, ttl=0, dir=True)
etcd_client.write.side_effect = etcd.EtcdRootReadOnly()
self.assertEqual(client.write('/', 'some-val'), None)
etcd_client.write.side_effect = etcd.EtcdNotFile()
self.assertEqual(client.write('/some-key', 'some-val'), None)
etcd_client.write.side_effect = etcd.EtcdNotDir()
self.assertEqual(client.write('/some-dir', 'some-val'), None)
etcd_client.write.side_effect = MaxRetryError(None, None)
self.assertEqual(client.write('/some-key', 'some-val'), None)
etcd_client.write.side_effect = ValueError
self.assertEqual(client.write('/some-key', 'some-val'), None)
etcd_client.write.side_effect = Exception
self.assertRaises(Exception, client.set, 'some-key', 'some-val')
@patch('etcd.Client', autospec=True)
def test_flatten(self, mock):
client = etcd_util.EtcdClient({})
some_data = {
'/x/y/a': '1',
'x': {
'y': {
'b': '2'
}
},
'm/j/': '3',
'z': '4',
'd': {},
}
result_path = {
'/test/x/y/a': '1',
'/test/x/y/b': '2',
'/test/m/j': '3',
'/test/z': '4',
'/test/d': {},
}
result_nopath = {
'/x/y/a': '1',
'/x/y/b': '2',
'/m/j': '3',
'/z': '4',
'/d': {},
}
result_root = {
'/x/y/a': '1',
'/x/y/b': '2',
'/m/j': '3',
'/z': '4',
'/d': {},
}
self.assertEqual(client._flatten(some_data, path='/test'), result_path)
self.assertEqual(client._flatten(some_data, path='/'), result_root)
self.assertEqual(client._flatten(some_data), result_nopath)
@patch('etcd.Client', autospec=True)
def test_update(self, mock):
client = etcd_util.EtcdClient({})
some_data = {
'/x/y/a': '1',
'x': {
'y': {
'b': '3'
}
},
'm/j/': '3',
'z': '4',
'd': {},
}
result = {
'/test/x/y/a': '1',
'/test/x/y/b': '2',
'/test/m/j': '3',
'/test/z': '4',
'/test/d': True,
}
flatten_result = {
'/test/x/y/a': '1',
'/test/x/y/b': '2',
'/test/m/j': '3',
'/test/z': '4',
'/test/d': {}
}
client._flatten = MagicMock(return_value=flatten_result)
self.assertEqual(client.update('/some/key', path='/blah'), None)
with patch.object(client, 'write', autospec=True) as write_mock:
def write_return(key, val, ttl=None, directory=None):
return result.get(key, None)
write_mock.side_effect = write_return
self.assertDictEqual(client.update(some_data, path='/test'), result)
client._flatten.assert_called_with(some_data, '/test')
self.assertEqual(write_mock.call_count, 5)
@patch('etcd.Client', autospec=True)
def test_rm(self, mock):
etcd_client = mock.return_value
client = etcd_util.EtcdClient({})
etcd_client.delete.return_value = True
self.assertEqual(client.rm('/some-key'), True)
etcd_client.delete.assert_called_with('/some-key', recursive=False)
self.assertEqual(client.rm('/some-dir', recurse=True), True)
etcd_client.delete.assert_called_with('/some-dir', recursive=True)
etcd_client.delete.side_effect = etcd.EtcdNotFile()
self.assertEqual(client.rm('/some-dir'), None)
etcd_client.delete.side_effect = etcd.EtcdDirNotEmpty()
self.assertEqual(client.rm('/some-key'), None)
etcd_client.delete.side_effect = etcd.EtcdRootReadOnly()
self.assertEqual(client.rm('/'), None)
etcd_client.delete.side_effect = ValueError
self.assertEqual(client.rm('/some-dir'), None)
etcd_client.delete.side_effect = Exception
self.assertRaises(Exception, client.rm, 'some-dir')
@patch('etcd.Client', autospec=True)
def test_watch(self, client_mock):
client = etcd_util.EtcdClient({})
with patch.object(client, 'read', autospec=True) as mock:
mock.return_value = MagicMock(value='stack', key='/some-key', modifiedIndex=1, dir=False)
self.assertDictEqual(client.watch('/some-key'),
{'value': 'stack', 'key': '/some-key', 'mIndex': 1, 'changed': True, 'dir': False})
mock.assert_called_with('/some-key', wait=True, recursive=False, timeout=0, waitIndex=None)
mock.side_effect = iter([etcd_util.EtcdUtilWatchTimeout, mock.return_value])
self.assertDictEqual(client.watch('/some-key'),
{'value': 'stack', 'changed': False, 'mIndex': 1, 'key': '/some-key', 'dir': False})
mock.side_effect = iter([etcd_util.EtcdUtilWatchTimeout, etcd.EtcdKeyNotFound])
self.assertEqual(client.watch('/some-key'),
{'value': None, 'changed': False, 'mIndex': 0, 'key': '/some-key', 'dir': False})
mock.side_effect = iter([etcd_util.EtcdUtilWatchTimeout, ValueError])
self.assertEqual(client.watch('/some-key'), {})
mock.side_effect = None
mock.return_value = MagicMock(value='stack', key='/some-key', modifiedIndex=1, dir=True)
self.assertDictEqual(client.watch('/some-dir', recurse=True, timeout=5, index=10),
{'value': 'stack', 'key': '/some-key', 'mIndex': 1, 'changed': True, 'dir': True})
mock.assert_called_with('/some-dir', wait=True, recursive=True, timeout=5, waitIndex=10)
mock.side_effect = MaxRetryError(None, None)
self.assertEqual(client.watch('/some-key'), {})
mock.side_effect = etcd.EtcdConnectionFailed()
self.assertEqual(client.watch('/some-key'), {})
mock.return_value = None
self.assertEqual(client.watch('/some-key'), {})
if __name__ == '__main__':
from integration import run_tests
run_tests(EtcdUtilTestCase, needs_daemon=False)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
A really simple module, just to demonstrate disutils
"""
def capitalize(infilename, outfilename):
"""
reads the contents of infilename, and writes it to outfilename, but with
every word capitalized
note: very primitive -- it will mess some files up!
this is called by the capitalize script
"""
infile = open(infilename, 'U')
outfile = open(outfilename, 'w')
for line in infile:
outfile.write( " ".join( [word.capitalize() for word in line.split() ] ) )
outfile.write("\n")
return None
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ utility/helper.py ]
# Synopsis [ helper functions ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import torch
#####################
# PARSE PRUNE HEADS #
#####################
def parse_prune_heads(config):
if 'prune_headids' in config['transformer'] and config['transformer']['prune_headids'] != 'None':
heads_int = []
spans = config['transformer']['prune_headids'].split(',')
for span in spans:
endpoints = span.split('-')
if len(endpoints) == 1:
heads_int.append(int(endpoints[0]))
elif len(endpoints) == 2:
heads_int += torch.arange(int(endpoints[0]), int(endpoints[1])).tolist()
else:
raise ValueError
print(f'[PRUNING] - heads {heads_int} will be pruned')
config['transformer']['prune_headids'] = heads_int
else:
config['transformer']['prune_headids'] = None
##########################
# GET TRANSFORMER TESTER #
##########################
def get_transformer_tester(from_path='result/result_transformer/libri_sd1337_fmllrBase960-F-N-K-RA/model-1000000.ckpt', display_settings=False):
''' Wrapper that loads the transformer model from checkpoint path '''
# load config and paras
all_states = torch.load(from_path, map_location='cpu')
config = all_states['Settings']['Config']
paras = all_states['Settings']['Paras']
# handling older checkpoints
if not hasattr(paras, 'multi_gpu'):
setattr(paras, 'multi_gpu', False)
if 'prune_headids' not in config['transformer']:
config['transformer']['prune_headids'] = None
# display checkpoint settings
if display_settings:
for cluster in config:
print(cluster + ':')
for item in config[cluster]:
print('\t' + str(item) + ': ', config[cluster][item])
print('paras:')
v_paras = vars(paras)
for item in v_paras:
print('\t' + str(item) + ': ', v_paras[item])
# load model with Tester
from transformer.solver import Tester
tester = Tester(config, paras)
tester.set_model(inference=True, with_head=False, from_path=from_path)
return tester
|
nilq/baby-python
|
python
|
# module msysio.py
# Requires Python 2.2 or better.
"""Provide helpful routines for interactive IO on the MSYS console"""
# Output needs to be flushed to be seen. It is especially important
# when prompting for user input.
import sys
import os
__all__ = ['raw_input_', 'print_', 'is_msys']
# 2.x/3.x compatibility stuff
try:
raw_input
except NameError:
raw_input = input
# Exported functions
def raw_input_(prompt=None):
"""Prompt for user input in an MSYS console friendly way"""
if prompt is None:
prompt = ''
print_(prompt, end='')
return raw_input()
def print_(*args, **kwds):
"""Print arguments in an MSYS console friendly way
Keyword arguments:
file, sep, end
"""
stream = kwds.get('file', sys.stdout)
sep = kwds.get('sep', ' ')
end = kwds.get('end', '\n')
if args:
stream.write(sep.join([str(arg) for arg in args]))
if end:
stream.write(end)
try:
stream.flush()
except AttributeError:
pass
def is_msys():
"""Return true if the execution environment is MSYS"""
try:
# Unfortunately there is no longer an MSYS specific identifier.
return os.environ['TERM'] == 'cygwin'
except KeyError:
return False
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
try:
here = __file__
except NameError:
# Python 2.2
here = sys.argv[0]
relative_paste = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(here))), 'paste')
if os.path.exists(relative_paste):
sys.path.insert(0, os.path.dirname(relative_paste))
from paste.script import command
command.run()
|
nilq/baby-python
|
python
|
from practicum import find_mcu_boards, McuBoard, PeriBoard
from flask import Flask, Response, jsonify, request
from flask_cors import CORS
import json
import threading
app = Flask(__name__)
CORS(app)
def ReadScore():
filename = "score.json"
with open(filename) as file:
data = json.load(file)
return data
@app.route('/scoreboard')
def Scoreboard():
scoreBoard = ReadScore()
return jsonify(scoreBoard)
|
nilq/baby-python
|
python
|
from python_kemptech_api import *
# Specify the LoadMaster connection credentials here:
loadmaster_ip = ""
username = ""
password = ""
lm = LoadMaster(loadmaster_ip, username, password)
# Specify the VS parameters:
vs_ip = ""
new_vs = ""
vs_port = ""
template_file = "template.txt"
# Create the VS
vs = lm.create_virtual_service(vs_ip, vs_port)
vs.save()
# Customize your VS here
vs.transparent = 'y'
vs.sslacceleration = 'y'
vs.update()
# Export the VS as a template and write to a file
template_content = vs.export()
with open(template_file, 'w') as f:
f.write(template_content)
# Upload template file to LoadMaster
lm.upload_template(template_file)
# Get template name and object
template_name, template_obj = lm.templates.popitem()
# Apply the template to a new VS
lm.apply_template(new_vs, vs_port, "tcp", template_name=template_name, nickname="VS from Template")
|
nilq/baby-python
|
python
|
"""
Title: Mammogram Mass Detector
Author: David Sternheim
Description:
The purpose of this script is to take data regarding mass detected in a mammogram and use machine learning
models to predict if this mass is malignant or benign. The data is taken form UCI public data sets.
Breakdown of the data set:
The data has 961 instances of masses detected in mammograms. It's stored in mammographic_masses.data.txt.
The format of the file is comma separated values with each of the following as one fo the values in order:
1. BI-RADS Assessment: 1 to 5 (ordinal)
2. Age: patient's age in years (integer)
3. Shape: mass shape: round=1 oval=2 lobular=3 irregular=4 (nominal)
4. Margin: mass margin: circumscribed=1 microlobulated=2 obscured=3 ill-defined=4 spiculated=5 (nominal)
5. Density: mass density high=1 iso=2 low=3 fat-containing=4 (ordinal)
6. Severity: benign=0 malignant=1
NOTE: '?' denotes a missing data value
Last Updated: 09/15/18
Known Bugs:
"""
import pandas as pd
from sklearn import tree
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.linear_model import LogisticRegression
"""
Reading in the data and pre-processing it.
"""
data = pd.read_csv('Assets/mammographic_masses.data.txt')
df = pd.DataFrame(data)
df.columns = ['BIRADS', 'Age', 'Shape', 'Margin', 'Density', 'Severity']
print(df.head())
d = {'1': 1.0, '2': 2.0, '3': 3.0, '4': 4.0, '5': 5.0, '?': -1.0}
df['BIRADS'] = df['BIRADS'].map(d)
df['Shape'] = df['Shape'].map(d)
df['Margin'] = df['Margin'].map(d)
df['Density'] = df['Density'].map(d)
df['Age'] = pd.to_numeric(df['Age'], errors='coerce')
df['Severity'] = pd.to_numeric(df['Severity'], errors='coerce')
df.fillna(-1.0, inplace=True)
df = df.astype('float32')
print(type(df['Severity'][0]))
"""
Implement Decision Tree. Trained with K-Folds Cross Validation with K=10
"""
y = df['Severity']
features = list(df.columns[:5])
x = df[features]
x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, test_size=.4, random_state=0)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x_train, y_train)
score = clf.score(x_test, y_test)
scores = model_selection.cross_val_score(clf, x, y, cv=10)
print('Decision Tree accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~76% accuracy
# Random Forests
clf = RandomForestClassifier(n_estimators=10)
clf = clf.fit(x_train, y_train)
score = clf.score(x_test, y_test)
scores = model_selection.cross_val_score(clf, x, y, cv=10)
print('Random Forest accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~78% accuracy
"""
Implement K-Nearest Neighbors. Trained with K-Folds Cross validation with K=10
"""
scaler = StandardScaler()
scaler = scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
clf = KNeighborsClassifier(n_neighbors=5)
clf = clf.fit(x_train, y_train)
score = clf.score(x_test, y_test)
scores = model_selection.cross_val_score(clf, x, y, cv=10)
print('K-Nearest Neighbor accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~79%
"""
Implement Naive Bayes. Trained with K-Folds Cross Validation with K=10
"""
clf = GaussianNB()
clf = clf.fit(x_train, y_train)
scores = model_selection.cross_val_score(clf, x, y, cv=10)
print('Naive Bayes accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~78%
"""
Implement Support Vector Machine
"""
C = 1.0
svc = svm.SVC(kernel='linear', C=C).fit(x_train, y_train)
scores = model_selection.cross_val_score(svc, x, y, cv=10)
print('Support Vector Machine accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~79%
"""
Implement Logistic Regression. Trained with K-Folds Cross Validation.
"""
lgr = LogisticRegression()
lgr = lgr.fit(x_train, y_train)
scores = model_selection.cross_val_score(lgr, x, y, cv=10)
print('Logistic Regression accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~79%
"""
Conclusions: Most machine learning models have an accuracy around 79%. DecisionTrees are by far the worst model to
detect if mass is malignant or benign because test returned a result of around 76%. Any of the other test can be
used to relative accuracy ~79%. The highest accuracy came from KNN at a high 79%. By adjusting hyper parameters, the
models may be improved.
"""
|
nilq/baby-python
|
python
|
import os
path = '/content/Multilingual_Text_to_Speech/checkpoints'
files = sorted(os.listdir(path))
|
nilq/baby-python
|
python
|
import numpy as np
import tensorflow as tf
tfkl = tf.keras.layers
def array2tensor(z, dtype=tf.float32):
"""Converts numpy arrays into tensorflow tensors.
Keyword arguments:
z -- numpy array
dtype -- data type of tensor entries (default float32)
"""
if len(np.shape(z)) == 1: # special case where input is a vector
return tf.cast(np.reshape(z, (np.shape(z)[0], 1)), dtype)
else:
return tf.cast(z, dtype)
def reduce_logmeanexp_offdiag(x, axis=None):
"""Contracts the tensor x on its off-diagonal elements and takes the logarithm.
Keyword arguments:
x -- tensorflow tensor
axis (int) -- contraction axis (default None)
if axis=None, does full contraction
:Authors:
Ben Poole
Copyright 2019 Google LLC.
"""
num_samples = x.shape[0].value
if axis:
log_num_elem = tf.math.log(num_samples - 1)
else:
log_num_elem = tf.math.log(num_samples * (num_samples - 1))
return tf.reduce_logsumexp(x - tf.linalg.tensor_diag(np.inf * tf.ones(num_samples)), axis=axis)\
- log_num_elem
def const_fn(x, const=1.0):
"""Function mapping any argument to a constant float value.
Keyword arguments:
x -- dummy argument
const (float) -- constant value of the image
"""
return const
def mlp(hidden_dim, output_dim, layers, activation):
"""Constructs multi-layer perceptron (MLP) critic with given number of hidden layers.
Keyword arguments:
hidden_dim (int) -- dimensionality of hidden dense layers
output_dim (int) -- dimensionality of the output tensor
layers (int) -- number of hidden dense layers
activation -- activation function of the neurons
"""
return tf.keras.Sequential(
[tfkl.Dense(hidden_dim, activation) for _ in range(layers)] +
[tfkl.Dense(output_dim)])
|
nilq/baby-python
|
python
|
# To run all the tests, run: python -m unittest in the terminal in the project directory.
from os.path import dirname, basename, isfile, join
import glob
# makes the modules easily loadable
modules = glob.glob(join(dirname(__file__), "*.py"))
__all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
Send events based on a script's stdout
.. versionadded:: Neon
Example Config
.. code-block:: yaml
engines:
- script:
cmd: /some/script.py -a 1 -b 2
output: json
interval: 5
Script engine configs:
cmd: Script or command to execute
output: Any available saltstack deserializer
interval: How often in seconds to execute the command
'''
from __future__ import absolute_import, print_function
import logging
import shlex
import time
import subprocess
# import salt libs
import salt.utils.event
import salt.utils.process
import salt.loader
from salt.exceptions import CommandExecutionError
from salt.ext import six
log = logging.getLogger(__name__)
def _read_stdout(proc):
'''
Generator that returns stdout
'''
for line in iter(proc.stdout.readline, ""):
yield line
def _get_serializer(output):
'''
Helper to return known serializer based on
pass output argument
'''
serializers = salt.loader.serializers(__opts__)
try:
return getattr(serializers, output)
except AttributeError:
raise CommandExecutionError(
"Unknown serializer '{0}' found for output option".format(output)
)
def start(cmd, output='json', interval=1):
'''
Parse stdout of a command and generate an event
The script engine will scrap stdout of the
given script and generate an event based on the
presence of the 'tag' key and it's value.
If there is a data obj available, that will also
be fired along with the tag.
Example:
Given the following json output from a script:
.. code-block:: json
{ "tag" : "lots/of/tacos",
"data" : { "toppings" : "cilantro" }
}
This will fire the event 'lots/of/tacos'
on the event bus with the data obj as is.
:param cmd: The command to execute
:param output: How to deserialize stdout of the script
:param interval: How often to execute the script
'''
try:
cmd = shlex.split(cmd)
except AttributeError:
cmd = shlex.split(six.text_type(cmd))
log.debug("script engine using command %s", cmd)
serializer = _get_serializer(output)
if __opts__.get('__role') == 'master':
fire_master = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir']).fire_event
else:
fire_master = __salt__['event.send']
while True:
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
log.debug("Starting script with pid %d", proc.pid)
for raw_event in _read_stdout(proc):
log.debug(raw_event)
event = serializer.deserialize(raw_event)
tag = event.get('tag', None)
data = event.get('data', {})
if data and 'id' not in data:
data['id'] = __opts__['id']
if tag:
log.info("script engine firing event with tag %s", tag)
fire_master(tag=tag, data=data)
log.debug("Closing script with pid %d", proc.pid)
proc.stdout.close()
rc = proc.wait()
if rc:
raise subprocess.CalledProcessError(rc, cmd)
except subprocess.CalledProcessError as e:
log.error(e)
finally:
if proc.poll is None:
proc.terminate()
time.sleep(interval)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import logging
from google.protobuf.descriptor import Descriptor, FieldDescriptor
from dremel.consts import *
from dremel.node import Node, CompositeNode
from dremel.field_graph import FieldNode, FieldGraph
from dremel.schema_pb2 import Schema, SchemaFieldDescriptor, SchemaFieldGraph
class DissectError(Exception):
""" Exception type in this package. """
pass
class FieldMixin(object):
def __init__(self, path, desc,
max_repetition_level=0,
definition_level=0):
super().__init__()
self._path = path
self._desc = desc
self._max_repetition_level = max_repetition_level
self._definition_level = definition_level
@property
def path(self):
return self._path
@property
def field_descriptor(self):
return self._desc
@property
def max_repetition_level(self):
return self._max_repetition_level
@property
def definition_level(self):
return self._definition_level
class FieldWriter(FieldMixin, Node):
def __init__(self, path, desc,
max_repetition_level=0,
definition_level=0,
write_callback=None):
super().__init__(path, desc, max_repetition_level, definition_level)
self._write_callback = write_callback
def set_write_callback(self, callback):
# setup custom callbacks
self._write_callback = callback
def __repr__(self):
return f'<Field: {self.path} R={self.max_repetition_level} D={self.definition_level}>'
def accept(self, r, d, msg, visitor):
if msg is None:
self._accept(r, d, None, visitor)
return
# NOTE(me): Here `msg` is the outer scope for values, by which in cpp
# it would be more convenient to handle field type dispatching.
label = self._desc.label
field_name = self._desc.name
if label == FieldDescriptor.LABEL_REQUIRED:
assert msg.HasField(field_name), f"Missing required field: {field_name}"
self._accept(r, d, getattr(msg, field_name), visitor)
elif label == FieldDescriptor.LABEL_OPTIONAL:
has_val = msg.HasField(field_name)
local_d = d+1 if has_val else d
val = getattr(msg, field_name) if has_val else None
self._accept(r, local_d, val, visitor)
elif label == FieldDescriptor.LABEL_REPEATED:
vals = getattr(msg, field_name)
if len(vals) == 0:
self._accept(r, d, None, visitor)
else:
local_r = r
for val in vals:
self._accept(local_r, d+1, val, visitor)
local_r = self._max_repetition_level
else:
raise DissectError("Invalid field label: {}".format(str(self._desc)))
def _accept(self, r, d, v, visitor):
visitor(self, r, d, v)
class MessageWriter(FieldWriter, CompositeNode):
def __init__(self, path, desc,
max_repetition_level=0,
definition_level=0):
super().__init__(path, desc, max_repetition_level, definition_level)
self._field_graph = None
def __repr__(self):
return f'<Message: {self.path} R={self.max_repetition_level} D={self.definition_level}>'
@property
def field_graph(self):
if self._field_graph is None:
self._field_graph = self._init_field_graph()
return self._field_graph
def _init_field_graph(self):
def _(node):
desc = SchemaFieldDescriptor(
path=node.path,
cpp_type=node.field_descriptor.cpp_type if node.field_descriptor else None,
label=node.field_descriptor.label if node.field_descriptor else None,
max_repetition_level=node.max_repetition_level,
definition_level=node.definition_level)
current = FieldNode(desc)
for child in getattr(node, 'child_nodes', []):
child_node = _(child)
current.add_child(child_node)
return current
root = _(self)
return FieldGraph(root)
def accept(self, r, d, msg, visitor):
if self.is_root():
# root msg has no outer scopes, so we should treat it specially.
self._accept(r, d, msg, visitor)
else:
super().accept(r, d, msg, visitor)
def _accept(self, r, d, v, visitor):
for child in self.child_nodes:
child.accept(r, d, v, visitor)
def write(self, msg):
if not self.is_root():
raise DissectError('cannnot write from non root nodes')
def visitor(node, r, d, v):
if node._write_callback:
node._write_callback(node, r, d, v)
self.accept(0, 0, msg, visitor)
def _get_valid_paths(fields):
""" Generate all possible field paths which are traversable. """
if fields is None or len(fields) == 0:
return None
m = dict()
for field in fields:
current = ROOT
segs = field.split('.')
for i, seg in enumerate(segs):
current += f'.{seg}'
leaf = (i+1 == len(segs))
if current in m:
if m[current] != leaf:
raise DissectError(f'Found an intermediate node conflicted: {current}')
else:
m[current] = leaf
return m
def _recurse_create_nodes(msg_desc, node, valid_paths, circuit_checks):
""" Create nodes recursively. """
if msg_desc.name in circuit_checks:
raise DissectError(f'Found recursive message definition: {msg_desc.name}')
circuit_checks.add(msg_desc.name)
for field in msg_desc.fields:
path = f'{node.path}.{field.name}'
if valid_paths is not None and path not in valid_paths:
logging.debug('invalid path: %s', path)
continue
max_repetition_level = node.max_repetition_level
definition_level = node.definition_level
if field.label == FieldDescriptor.LABEL_OPTIONAL:
definition_level += 1
elif field.label == FieldDescriptor.LABEL_REPEATED:
definition_level += 1
max_repetition_level += 1
if field.type in (FieldDescriptor.TYPE_GROUP, FieldDescriptor.TYPE_MESSAGE):
child = MessageWriter(path, field, max_repetition_level, definition_level)
_recurse_create_nodes(field.message_type, child, valid_paths, circuit_checks)
else:
child = FieldWriter(path, field, max_repetition_level, definition_level)
logging.debug('create field writer: %s', path)
node.add_child(child)
circuit_checks.remove(msg_desc.name)
def _prune(node):
""" Remove unused message nodes. """
while node.parent is not None:
parent = node.parent
parent.remove_child(node)
logging.info('prune node: %s', node.path)
if len(parent.child_nodes) == 0:
node = parent
else:
break
def new_message_writer(msg_desc, fields=None):
valid_paths = _get_valid_paths(fields)
writer = MessageWriter(ROOT, None)
_recurse_create_nodes(msg_desc, writer, valid_paths, set())
# prune used nodes
dead_nodes = []
def _(node):
if isinstance(node, MessageWriter) and len(node.child_nodes) == 0:
dead_nodes.append(node)
writer.node_accept(_)
for node in dead_nodes:
_prune(node)
if len(writer.child_nodes) == 0:
raise DissectError(f'No valid leaf fields in root writer, chosen: {fields}')
return writer
|
nilq/baby-python
|
python
|
from ptcaccount2.accounts import random_account
|
nilq/baby-python
|
python
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
from op_tester import op_tester
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(Path(__file__).resolve().parent.parent)
import test_util as tu
def test_matmul_basic(op_tester):
d1 = np.random.rand(2, 3).astype(np.float32)
d2 = np.random.rand(3, 4).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.matmul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.matmul(d1, d2)
return [out]
op_tester.run(init_builder, reference)
|
nilq/baby-python
|
python
|
from urllib.parse import urljoin
from uuid import UUID
import pytest
import reversion
from django.conf import settings
from django.utils.timezone import now
from freezegun import freeze_time
from requests.exceptions import (
ConnectionError,
ConnectTimeout,
ReadTimeout,
Timeout,
)
from rest_framework import serializers, status
from reversion.models import Version
from datahub.company.models import Company
from datahub.company.test.factories import AdviserFactory, CompanyFactory
from datahub.dnb_api.constants import ALL_DNB_UPDATED_MODEL_FIELDS
from datahub.dnb_api.test.utils import model_to_dict_company
from datahub.dnb_api.utils import (
DNBServiceConnectionError,
DNBServiceError,
DNBServiceInvalidRequest,
DNBServiceInvalidResponse,
DNBServiceTimeoutError,
format_dnb_company,
get_company,
get_company_update_page,
RevisionNotFoundError,
rollback_dnb_company_update,
update_company_from_dnb,
)
from datahub.metadata.models import Country
pytestmark = pytest.mark.django_db
DNB_SEARCH_URL = urljoin(f'{settings.DNB_SERVICE_BASE_URL}/', 'companies/search/')
DNB_UPDATES_URL = urljoin(f'{settings.DNB_SERVICE_BASE_URL}/', 'companies/')
@pytest.mark.parametrize(
'dnb_response_status',
(
status.HTTP_400_BAD_REQUEST,
status.HTTP_401_UNAUTHORIZED,
status.HTTP_403_FORBIDDEN,
status.HTTP_404_NOT_FOUND,
status.HTTP_405_METHOD_NOT_ALLOWED,
status.HTTP_500_INTERNAL_SERVER_ERROR,
),
)
def test_get_company_dnb_service_error(
caplog,
requests_mock,
dnb_response_status,
):
"""
Test if the dnb-service returns a status code that is not
200, we log it and raise the exception with an appropriate
message.
"""
requests_mock.post(
DNB_SEARCH_URL,
status_code=dnb_response_status,
)
with pytest.raises(DNBServiceError) as e:
get_company('123456789')
expected_message = f'DNB service returned an error status: {dnb_response_status}'
assert e.value.args[0] == expected_message
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == expected_message
@pytest.mark.parametrize(
'request_exception,expected_exception,expected_message',
(
(
ConnectionError,
DNBServiceConnectionError,
'Encountered an error connecting to DNB service',
),
(
ConnectTimeout,
DNBServiceConnectionError,
'Encountered an error connecting to DNB service',
),
(
Timeout,
DNBServiceTimeoutError,
'Encountered a timeout interacting with DNB service',
),
(
ReadTimeout,
DNBServiceTimeoutError,
'Encountered a timeout interacting with DNB service',
),
),
)
def test_get_company_dnb_service_request_error(
caplog,
requests_mock,
request_exception,
expected_exception,
expected_message,
):
"""
Test if there is an error connecting to dnb-service, we log it and raise the exception with an
appropriate message.
"""
requests_mock.post(
DNB_SEARCH_URL,
exc=request_exception,
)
with pytest.raises(expected_exception) as e:
get_company('123456789')
assert e.value.args[0] == expected_message
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == expected_message
@pytest.mark.parametrize(
'search_results, expected_exception, expected_message',
(
(
[],
DNBServiceInvalidRequest,
'Cannot find a company with duns_number: 123456789',
),
(
['foo', 'bar'],
DNBServiceInvalidResponse,
'Multiple companies found with duns_number: 123456789',
),
(
[{'duns_number': '012345678'}],
DNBServiceInvalidResponse,
'DUNS number of the company: 012345678 '
'did not match searched DUNS number: 123456789',
),
),
)
def test_get_company_invalid_request_response(
caplog,
requests_mock,
search_results,
expected_exception,
expected_message,
):
"""
Test if a given `duns_number` gets anything other than a single company
from dnb-service, the get_company function raises an exception.
"""
requests_mock.post(
DNB_SEARCH_URL,
json={'results': search_results},
)
with pytest.raises(expected_exception) as e:
get_company('123456789')
assert e.value.args[0] == expected_message
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == expected_message
def test_get_company_valid(
caplog,
requests_mock,
dnb_response_uk,
):
"""
Test if dnb-service returns a valid response, get_company
returns a formatted dict.
"""
requests_mock.post(
DNB_SEARCH_URL,
json=dnb_response_uk,
)
dnb_company = get_company('123456789')
assert dnb_company == {
'company_number': '01261539',
'name': 'FOO BICYCLE LIMITED',
'duns_number': '123456789',
'trading_names': [],
'address': {
'country': UUID('80756b9a-5d95-e211-a939-e4115bead28a'),
'county': '',
'line_1': 'Unit 10, Ockham Drive',
'line_2': '',
'postcode': 'UB6 0F2',
'town': 'GREENFORD',
},
'registered_address': {
'country': UUID('80756b9a-5d95-e211-a939-e4115bead28a'),
'county': '',
'line_1': 'C/O LONE VARY',
'line_2': '',
'postcode': 'UB6 0F2',
'town': 'GREENFORD',
},
'number_of_employees': 260,
'is_number_of_employees_estimated': True,
'turnover': 50651895.0,
'is_turnover_estimated': None,
'uk_based': True,
'website': 'http://foo.com',
'global_ultimate_duns_number': '291332174',
}
class TestUpdateCompanyFromDNB:
"""
Test update_company_from_dnb utility function.
"""
@pytest.mark.parametrize(
'adviser_callable',
(
lambda: None,
lambda: AdviserFactory(),
),
)
@pytest.mark.parametrize(
'update_descriptor',
(
None,
'automatic',
),
)
@freeze_time('2019-01-01 11:12:13')
def test_update_company_from_dnb_all_fields(
self,
formatted_dnb_company,
adviser_callable,
update_descriptor,
):
"""
Test that update_company_from_dnb will update all fields when the fields
kwarg is not specified.
"""
duns_number = '123456789'
company = CompanyFactory(duns_number=duns_number, pending_dnb_investigation=True)
original_company = Company.objects.get(id=company.id)
adviser = adviser_callable()
update_company_from_dnb(
company,
formatted_dnb_company,
user=adviser,
update_descriptor=update_descriptor,
)
company.refresh_from_db()
uk_country = Country.objects.get(iso_alpha2_code='GB')
assert model_to_dict_company(company) == {
'address_1': 'Unit 10, Ockham Drive',
'address_2': '',
'address_country': uk_country.id,
'address_county': '',
'address_postcode': 'UB6 0F2',
'address_town': 'GREENFORD',
'archived': False,
'archived_by': None,
'archived_documents_url_path': original_company.archived_documents_url_path,
'archived_on': None,
'archived_reason': None,
'business_type': original_company.business_type.id,
'company_number': '01261539',
'created_by': original_company.created_by.id,
'description': None,
'dnb_investigation_data': None,
'duns_number': '123456789',
'employee_range': original_company.employee_range.id,
'export_experience_category': original_company.export_experience_category.id,
'export_potential': None,
'export_to_countries': [],
'future_interest_countries': [],
'global_headquarters': None,
'global_ultimate_duns_number': '291332174',
'great_profile_status': None,
'headquarter_type': None,
'id': original_company.id,
'is_number_of_employees_estimated': True,
'is_turnover_estimated': None,
'modified_by': adviser.id if adviser else original_company.modified_by.id,
'name': 'FOO BICYCLE LIMITED',
'number_of_employees': 260,
'one_list_account_owner': None,
'one_list_tier': None,
'pending_dnb_investigation': False,
'reference_code': '',
'sector': original_company.sector.id,
'trading_names': [],
'transfer_reason': '',
'transferred_by': None,
'transferred_on': None,
'transferred_to': None,
'turnover': 50651895,
'turnover_range': original_company.turnover_range.id,
'uk_region': original_company.uk_region.id,
'vat_number': '',
'dnb_modified_on': now(),
}
versions = list(Version.objects.get_for_object(company))
assert len(versions) == 1
version = versions[0]
if update_descriptor:
assert version.revision.comment == f'Updated from D&B [{update_descriptor}]'
else:
assert version.revision.comment == 'Updated from D&B'
assert version.revision.user == adviser
if not adviser:
assert company.modified_on == original_company.modified_on
@pytest.mark.parametrize(
'adviser_callable',
(
lambda: None,
lambda: AdviserFactory(),
),
)
def test_update_company_from_dnb_partial_fields_single(
self,
formatted_dnb_company,
adviser_callable,
):
"""
Test that update_company_from_dnb can update a subset of fields.
"""
duns_number = '123456789'
company = CompanyFactory(duns_number=duns_number)
original_company = Company.objects.get(id=company.id)
adviser = adviser_callable()
update_company_from_dnb(
company,
formatted_dnb_company,
adviser,
fields_to_update=['global_ultimate_duns_number'],
)
company.refresh_from_db()
dnb_ultimate_duns = formatted_dnb_company['global_ultimate_duns_number']
assert company.global_ultimate_duns_number == dnb_ultimate_duns
assert company.name == original_company.name
assert company.number_of_employees == original_company.number_of_employees
@pytest.mark.parametrize(
'adviser_callable',
(
lambda: None,
lambda: AdviserFactory(),
),
)
def test_update_company_from_dnb_partial_fields_multiple(
self,
formatted_dnb_company,
adviser_callable,
):
"""
Test that update_company_from_dnb can update a subset of fields.
"""
duns_number = '123456789'
company = CompanyFactory(duns_number=duns_number)
original_company = Company.objects.get(id=company.id)
adviser = adviser_callable()
update_company_from_dnb(
company,
formatted_dnb_company,
adviser,
fields_to_update=['name', 'address'],
)
company.refresh_from_db()
assert company.global_ultimate_duns_number == original_company.global_ultimate_duns_number
assert company.number_of_employees == original_company.number_of_employees
assert company.name == formatted_dnb_company['name']
assert company.address_1 == formatted_dnb_company['address']['line_1']
assert company.address_2 == formatted_dnb_company['address']['line_2']
assert company.address_town == formatted_dnb_company['address']['town']
assert company.address_county == formatted_dnb_company['address']['county']
assert company.address_postcode == formatted_dnb_company['address']['postcode']
def test_post_dnb_data_invalid(
self,
formatted_dnb_company,
):
"""
Tests that ValidationError is raised when data returned by DNB is not valid for saving to a
Data Hub Company.
"""
company = CompanyFactory(duns_number='123456789')
adviser = AdviserFactory()
formatted_dnb_company['name'] = None
with pytest.raises(serializers.ValidationError) as excinfo:
update_company_from_dnb(company, formatted_dnb_company, adviser)
assert str(excinfo) == 'Data from D&B did not pass the Data Hub validation checks.'
class TestGetCompanyUpdatePage:
"""
Test for the `get_company_update_page` utility function.
"""
@pytest.mark.parametrize(
'last_updated_after', (
'2019-11-11T12:00:00',
'2019-11-11',
),
)
@pytest.mark.parametrize(
'next_page', (
None,
'http://some.url/endpoint?cursor=some-cursor',
),
)
def test_valid(self, requests_mock, last_updated_after, next_page):
"""
Test if `get_company_update_page` returns the right response
on the happy-path.
"""
expected_response = {
'previous': None,
'next': f'{DNB_UPDATES_URL}?cursor=next-cursor',
'results': [
{'key': 'value'},
],
}
mocker = requests_mock.get(
next_page if next_page else DNB_UPDATES_URL,
status_code=status.HTTP_200_OK,
json=expected_response,
)
response = get_company_update_page(last_updated_after, next_page)
if next_page:
assert mocker.last_request.url == next_page
else:
assert mocker.last_request.qs.get('last_updated_after') == [last_updated_after]
assert response == expected_response
@pytest.mark.parametrize(
'dnb_response_status',
(
status.HTTP_400_BAD_REQUEST,
status.HTTP_401_UNAUTHORIZED,
status.HTTP_403_FORBIDDEN,
status.HTTP_404_NOT_FOUND,
status.HTTP_405_METHOD_NOT_ALLOWED,
status.HTTP_500_INTERNAL_SERVER_ERROR,
),
)
def test_dnb_service_error(
self,
caplog,
requests_mock,
dnb_response_status,
):
"""
Test if the dnb-service returns a status code that is not
200, we log it and raise the exception with an appropriate
message.
"""
requests_mock.get(
DNB_UPDATES_URL,
status_code=dnb_response_status,
)
with pytest.raises(DNBServiceError) as e:
get_company_update_page(last_updated_after='foo')
expected_message = f'DNB service returned an error status: {dnb_response_status}'
assert e.value.args[0] == expected_message
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == expected_message
@pytest.mark.parametrize(
'request_exception, expected_exception, expected_message',
(
(
ConnectionError,
DNBServiceConnectionError,
'Encountered an error connecting to DNB service',
),
(
ConnectTimeout,
DNBServiceConnectionError,
'Encountered an error connecting to DNB service',
),
(
Timeout,
DNBServiceTimeoutError,
'Encountered a timeout interacting with DNB service',
),
(
ReadTimeout,
DNBServiceTimeoutError,
'Encountered a timeout interacting with DNB service',
),
),
)
def test_get_company_dnb_service_request_error(
self,
caplog,
requests_mock,
request_exception,
expected_exception,
expected_message,
):
"""
Test if there is an error connecting to dnb-service, we log it and raise
the exception with an appropriate message.
"""
requests_mock.get(
DNB_UPDATES_URL,
exc=request_exception,
)
with pytest.raises(expected_exception) as excinfo:
get_company_update_page(last_updated_after='foo')
assert str(excinfo.value) == expected_message
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == expected_message
class TestRollbackDNBCompanyUpdate:
"""
Test rollback_dnb_company_update utility function.
"""
@pytest.mark.parametrize(
'fields, expected_fields',
(
(None, ALL_DNB_UPDATED_MODEL_FIELDS),
(['name'], ['name']),
),
)
def test_rollback(
self,
formatted_dnb_company,
fields,
expected_fields,
):
"""
Test that rollback_dnb_company_update will roll back all DNB fields.
"""
with reversion.create_revision():
company = CompanyFactory(duns_number=formatted_dnb_company['duns_number'])
original_company = Company.objects.get(id=company.id)
update_company_from_dnb(
company,
formatted_dnb_company,
update_descriptor='foo',
)
rollback_dnb_company_update(company, 'foo', fields_to_update=fields)
company.refresh_from_db()
for field in expected_fields:
assert getattr(company, field) == getattr(original_company, field)
latest_version = Version.objects.get_for_object(company)[0]
assert latest_version.revision.comment == 'Reverted D&B update from: foo'
@pytest.mark.parametrize(
'update_comment, error_message',
(
('foo', 'Revision with comment: foo is the base version.'),
('bar', 'Revision with comment: bar not found.'),
),
)
def test_rollback_error(
self,
formatted_dnb_company,
update_comment,
error_message,
):
"""
Test that rollback_dnb_company_update will fail with the given error
message when there is an issue in finding the version to revert to.
"""
company = CompanyFactory(duns_number=formatted_dnb_company['duns_number'])
update_company_from_dnb(
company,
formatted_dnb_company,
update_descriptor='foo',
)
with pytest.raises(RevisionNotFoundError) as excinfo:
rollback_dnb_company_update(company, update_comment)
assert str(excinfo.value) == error_message
class TestFormatDNBCompany:
"""
Tests for format_dnb_company function.
"""
def test_turnover_usd(self, dnb_response_uk):
"""
Test that the function returns `turnover`
and `is_turnover_estimated` when `annual_sales`
are in USD.
"""
dnb_company = dnb_response_uk['results'][0]
company = format_dnb_company(dnb_company)
assert company['turnover'] == dnb_company['annual_sales']
assert company['is_turnover_estimated'] == dnb_company['is_annual_sales_estimated']
def test_turnover_non_usd(self, dnb_response_uk):
"""
Test that the function does not return `turnover`
and `is_turnover_estimated` when `annual_sales`
are not in USD.
"""
dnb_company = dnb_response_uk['results'][0]
dnb_company['annual_sales_currency'] = 'GBP'
company = format_dnb_company(dnb_company)
assert company['turnover'] is None
assert company['is_turnover_estimated'] is None
|
nilq/baby-python
|
python
|
# Importing standard libraries
import sys
import copy
'''
Basic Cryptanalysis : The logic is pretty simple.
Step 1: Construct a set of candidate solutions to each words decoded
message based on length. (Length of encoded and decoded mssage
is the same)
Step 2: Try out each path recursively, breaking when inconsistency in
mapping is encountered. For this the getPath function is used
Possible Optimization : Sort the word list from largest to smallest
before performing Steps 1 and Steps 2
'''
'''
Reads String array from stream passed in as parameter. Simple parse
function that can raed from files as well as standard input
'''
def parseStringArr(stream):
return [str(x) for x in stream.readline().rstrip().split()]
'''
Returns the dictionary list as a set of words by reading from the
dictionary.lst file
'''
def getDictList(s):
f = open(s)
curStr = f.readline().rstrip()
correctWords = set()
while(curStr):
correctWords.add(curStr)
curStr = f.readline().rstrip()
return correctWords
'''
Main function to compute the decoded message.
'''
def convert(inWordList,dictSet):
# Constructing the candidate set for the conversion
candidateSet = [set() for i in range(len(inWordList))]
for i in range(len(inWordList)):
for j in dictSet:
if(len(inWordList[i]) == len(j)):
candidateSet[i].add(j.lower())
index = 0
outWordList = []
mapping = {chr(i + ord('a')):'0' for i in range(26)}
outWordList = getPath(dictSet,inWordList,mapping,candidateSet,index)
return outWordList
'''
Recursive function that computes the decoded message
'''
def getPath(dictSet,inWordList,mapping,candidateSet,index):
if(index >= len(candidateSet)): return [];
if(len(candidateSet[index]) == 0): return [];
else:
candidateSoln = candidateSet[index]
curWord = inWordList[index]
maxPath = []
for soln in candidateSoln:
if(isValid(mapping,curWord,soln)):
newMapping = extendMapping(mapping,curWord,soln)
path = getPath(dictSet,inWordList,newMapping,candidateSet,index + 1)
path = [soln] + path
if(len(path) > len(maxPath)):
maxPath = path
if(len(maxPath) == len(inWordList) - index):
return maxPath
else:
return []
'''
Function that checks id a particular candidate solution to a particular
word is a valid mapping and is consistent with previous mapping
'''
def isValid(mapping,curWord,soln):
for i in range(len(curWord)):
if(mapping[curWord[i]] != soln[i]):
if(mapping[curWord[i]] != '0'):
return False
return True
'''
Extends teh current mapping with the mapping from the current solution
under consideration
'''
def extendMapping(mapping,curWord,soln):
newMapping = copy.deepcopy(mapping)
for i in range(len(curWord)):
newMapping[curWord[i]] = soln[i]
return newMapping
'''
Main function to run the program
'''
if __name__ == "__main__":
stream = sys.stdin
dictSet = getDictList('dictionary.lst')
inWordList = parseStringArr(stream)
outWordList = convert(inWordList,dictSet)
print ' '.join(outWordList)
''' END '''
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from celery import task
from celery import Celery
from celery import app
import pymongo
import json
from bson import json_util,ObjectId
from pymongo import MongoClient
# from pymongo import find_many
from bson.dbref import DBRef
from pymongo.mongo_replica_set_client import MongoReplicaSetClient
from pymongo.read_preferences import ReadPreference
from operator import itemgetter
from random import randint
import bisect
import collections
# from pymongo.objectid import ObjectId
#client = MongoClient()
client = MongoReplicaSetClient(
'localhost:27017,localhost:27018,localhost:27019',
replicaSet='socsDBset')
client.readPreference = 'primaryPreferred'
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
# {u'course_id': u'cse220', u'blocks': {u'start_period': u'1', u'days': [u'M', u'W'], u'end_period': u'2'}, u'instructor': u'wong', u'course_name': u'systems', u'block_key_value': u'13'}
class DayItemSort(object):
def __init__(self, course_id,blocks,instructor,course_name,block_key_value):
self.course_id = course_id
self.blocks = blocks
self.instructor = instructor
self.course_name= course_name
self.block_key_value = block_key_value
def __repr__(self):
return '{}: {} '' {} {} {} {}'.format(self.__class__.__name__,
self.course_id,
self.blocks,
self.instructor,
self.course_name,
self.block_key_value)
def __cmp__(self, other):
if hasattr(other, 'getKey'):
return self.getKey().__cmp__(other.getKey())
def getKey(self):
return self.block_key_value
def __getitem__(self, key):
return self.block_key_value[key]
# @task(bind = True, queue = 'read_tasks')
# def create_desired_schedule(self, data):
# data = [ {
# 'course_id' : "cse220",
# 'blocks' : {
# 'start_period' : "1",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "2"
# },
# 'instructor' : "wong",
# 'course_name' : "systems",
# 'preferred': False
# },
# {
# 'course_id' : "cse114",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "skiena",
# 'course_name' : "intro",
# 'preferred': True
# },
# {
# 'course_id' : "cse110",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "bach",
# 'course_name' : "android",
# 'preferred': False
# }
# ]
# # data.append()
# db = client.students
# student_collection = db.students
# assigned_schedule = db.assigned_schedule
# email = 'peter@gmail.com'
# who_i_am =student_collection.find_one({'email':email})
# friends_loc = str(who_i_am['friendslist'])
# friends_loc = friends_loc.split(",",1)
# friends_loc = friends_loc[1]
# friends_loc = friends_loc.split("'",2)
# friends_loc = friends_loc[1]
# list_of_stuff= db.friends_list.find_one({'_id':ObjectId(friends_loc)})
# list_of_stuff= list_of_stuff['list']
# day_map= {'M':"1",'Tu':"2",'W':"3",'Th':"4",'F':"5",'S':"6",'Su':"7"}
# # num_friends_in_classes_hash = {}
# friends_overlap = []
# course_hash_map={}
# current_blocks =[]
# sort_day_value = ""
# for courses_in_data in data:
# # course_hash_map[courses_in_data['course_name']] = 0
# courses_in_data['count'] = 0
# for fr in list_of_stuff:
# assigned_schedule_friends =assigned_schedule.find_one({'email':fr['email']})
# friends_class_array = assigned_schedule_friends['classes']
# for classes in data:
# for fclasses in friends_class_array:
# if fclasses['course_name']==classes['course_name'] and fclasses['instructor']== classes['instructor'] and fclasses['course_id']==classes['course_id']:
# classes['count']=classes['count']+1
# for classes in data:
# current_blocks = classes['blocks']
# for day in current_blocks['days']:
# sort_day_value = sort_day_value + day_map[day]
# classes['block_key_value'] = sort_day_value
# classes['dif'] = int(current_blocks['end_period'])- int(current_blocks['start_period'])
# sort_day_value = ""
# for da in data:
# da['weight'] = 0.01
# if da['preferred']== True:
# da['weight'] = da['weight']+.6
# da['weight'] = (da['count'] *.1) + da['weight']
# new_list = sorted(data, key=itemgetter('block_key_value', 'dif'))
# start = []
# finish = []
# for datas in new_list:
# this_block = datas['blocks']
# start.append(this_block['start_period'])
# finish.append(this_block['end_period'])
# p = []
# for j in xrange(len(new_list)):
# i = bisect.bisect_right(finish, start[j]) - 1 # rightmost interval f_i <= s_j
# p.append(i)
# OPT = collections.defaultdict(int)
# OPT[-1] = 0
# OPT[0] = 0
# for j in xrange(1, len(new_list)):
# dats = new_list[j]
# print(dats)
# OPT[j] = max(dats['weight'] + OPT[p[j]], OPT[j - 1])
# # given OPT and p, find actual solution intervals in O(n)
# O = []
# def compute_solution(j):
# if j >= 0: # will halt on OPT[-1]
# dats = new_list[j]
# if dats['weight'] + OPT[p[j]] > OPT[j - 1]:
# O.append(new_list[j])
# compute_solution(p[j])
# else:
# compute_solution(j - 1)
# compute_solution(len(new_list) - 1)
# return O
@task(bind=True, queue='read_tasks')
def find_school_two(self, data):
db = client.students
school_collection = db.school_list
student_collection = db.students
student =student_collection.find_one({'email':data['email']})
student_school = student['school']
student_school_address = student['address']
print("PPOOOOOOOOOOOOOOOOOOOOOODLE")
print(student_school)
print(student_school_address)
target = school_collection.find_one( { '$and': [ { 'name': student_school }, { 'address': student_school_address } ] })
del target['_id']
return json_util.dumps(target)
@task(bind=True, queue='read_tasks')
def get_overlapping_friends_by_specific_course_two(self, data):
db = client.students
assigned_schedule = db.assigned_schedule
email = data['email']
target = data['target']
# name = data['course_name']
# start_period = data['start_period']
# end_period = data['end_period']
# course_id = data['course_id']
# instructor = data['instructor']
# print(email)
assigned_schedule_return =assigned_schedule.find_one({'email':email})
assigned_schedule_friends =assigned_schedule.find_one({'email':target})
# "classes" : [
# {
# "course_name" : "wongs time",
# "start_period" : "1",
# "days" : [
# "tu"
# ],
# "end_period" : "2",
# "course_id" : "cse220",
# "instructor" : "wong"
# },
return_list={}
course_list=[]
class_array = assigned_schedule_return['classes']
friends_class_array = assigned_schedule_friends['classes']
return_list['friend']=target
for classes in class_array:
for fclasses in friends_class_array:
if fclasses['course_name']==classes['course_name'] and fclasses['instructor']== classes['instructor'] and fclasses['course_id']==classes['course_id']:
course_list.append(fclasses['course_id'])
return_list['courses']=course_list
return return_list
@task(bind=True, queue='write_tasks')
def add_students_to_database_two(self, data):
db = client.students
students_temp = db.students
friends_list = db.friends_list
first_name_var = data['first_name']
last_name_var = data['last_name']
email_stuff = data['email']
school_name = data['school']
school_address = data['address']
friend_info_dict = {
'first_name': first_name_var,
'last_name': last_name_var,
'list': []}
id_1 = friends_list.insert_one(friend_info_dict)
student_dict = {
'first_name': first_name_var,
'last_name': last_name_var,
'email': email_stuff,
'school': school_name,
'address':school_address,
'friendslist': DBRef(
'friends_list',
friend_info_dict["_id"])}
print (student_dict)
id_2 = students_temp.insert_one(student_dict)
return str(student_dict)
@task(bind=True, queue='write_tasks')
def remove_school(self, data):
db = client.students
school_collection = db.school_list
name = data['school_name']
address = data['school_address']
target = school_collection.find_one_and_delete( { '$and': [ { 'name': name }, { 'address': address } ] })
#school_collection.remove(target.id)
return str(target)
@task(bind = True, queue = 'read_tasks')
def create_desired_schedule(self, email_address, data):
# data = [ {
# 'course_id' : "cse220",
# 'blocks' : {
# 'start_period' : "1",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "2"
# },
# 'instructor' : "wong",
# 'course_name' : "systems",
# 'preferred': False
# },
# {
# 'course_id' : "cse114",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "skiena",
# 'course_name' : "intro",
# 'preferred': True
# },
# {
# 'course_id' : "cse110",
# 'blocks' : {
# 'start_period' : "5",
# 'days' : [
# "M",
# "W"
# ],
# 'end_period' : "6"
# },
# 'instructor' : "bach",
# 'course_name' : "android",
# 'preferred': False
# }
# ]
# data.append()
db = client.students
student_collection = db.students
assigned_schedule = db.assigned_schedule
email = 'peter@gmail.com'
who_i_am =student_collection.find_one({'email':email})
if who_i_am != None:
friends_loc = str(who_i_am['friendslist'])
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
list_of_stuff= db.friends_list.find_one({'_id':ObjectId(friends_loc)})
list_of_stuff= list_of_stuff['list']
else:
pass
day_map= {'M':"1",'Tu':"2",'W':"3",'Th':"4",'F':"5",'S':"6",'Su':"7"}
# num_friends_in_classes_hash = {}
friends_overlap = []
course_hash_map={}
current_blocks =[]
sort_day_value = ""
for courses_in_data in data:
# course_hash_map[courses_in_data['course_name']] = 0
courses_in_data['count'] = 0
for fr in list_of_stuff:
assigned_schedule_friends = assigned_schedule.find_one({'email':fr['email']})
friends_class_array = assigned_schedule_friends['classes']
if friend_class_array == None:
friend_class_array = []
for classes in data:
for fclasses in friends_class_array:
if fclasses['course_name']==classes['course_name'] and fclasses['instructor']== classes['instructor'] and fclasses['course_id']==classes['course_id']:
classes['count']=classes['count']+1
for classes in data:
current_blocks = classes['blocks']
for day in current_blocks['days']:
sort_day_value = sort_day_value + day_map[day]
classes['block_key_value'] = sort_day_value
classes['dif'] = int(current_blocks['end_period'])- int(current_blocks['start_period'])
sort_day_value = ""
for da in data:
da['weight'] = 0.01
if da['preferred']== True:
da['weight'] = da['weight']+.6
da['weight'] = (da['count'] *.1) + da['weight']
new_list = sorted(data, key=itemgetter('block_key_value', 'dif'))
start = []
finish = []
for datas in new_list:
this_block = datas['blocks']
start.append(this_block['start_period'])
finish.append(this_block['end_period'])
p = []
for j in range(len(new_list)):
i = bisect.bisect_right(finish, start[j]) - 1 # rightmost interval f_i <= s_j
p.append(i)
OPT = collections.defaultdict(int)
OPT[-1] = 0
OPT[0] = 0
for j in range(1, len(new_list)):
dats = new_list[j]
print(dats)
OPT[j] = max(dats['weight'] + OPT[p[j]], OPT[j - 1])
# given OPT and p, find actual solution intervals in O(n)
O = []
def compute_solution(j):
if j >= 0: # will halt on OPT[-1]
dats = new_list[j]
if dats['weight'] + OPT[p[j]] > OPT[j - 1]:
O.append(new_list[j])
compute_solution(p[j])
else:
compute_solution(j - 1)
compute_solution(len(new_list) - 1)
return O
@task(bind=True, queue='write_tasks')
def remove_a_class_from_assigned_two(self, data,days_array):
db = client.students
assigned_schedule = db.assigned_schedule
email = data['email']
name = data['course_name']
start_period = data['start_period']
end_period = data['end_period']
course_id = data['course_id']
instructor = data['instructor']
print(data)
print(days_array)
blocks = {}
blocks['start_period'] = start_period
blocks['end_period'] = end_period
blocks['days'] = days_array
print(" ")
print(blocks)
val =assigned_schedule.find_one_and_update( {'email': email, 'classes.course_name': name, 'classes.course_id':course_id,'classes.instructor':instructor},
{'$pull': { 'classes': { 'course_name': name, 'course_id':course_id,'instructor':instructor}}})
print(val)
return json_util.dumps(val)
@task(bind = True,queue='read_tasks')
def get_course_offerings_two(self,email,year):
db = client.students
student_collection = db.students
school_collection = db.school_list
course_offerings =db.semester_courses_ref
course_list = db.course_list
# print(email)
who_i_am =student_collection.find_one({'email':email})
school_i_go_to = who_i_am['school']
school_address = who_i_am['address']
# print(school_i_go_to)
my_school =school_collection.find_one({'$and': [{'address': school_address}, {'name': school_i_go_to}]})
# year is missing
output = []
for yr in my_school['year']:
if yr['year_name']== year:
all_semesters = yr['semesters']
for als in all_semesters:
semester_ref = als['semester_courses_ref']
semester_name = als['semester_name']
course_ref_list = course_offerings.find_one({'_id':ObjectId(semester_ref)})
courses_held = course_ref_list['courses_held']
for cor in courses_held:
# prepare to trim the stuff we dont need
setup_course = {}
id_of_this_course = str(cor['course_id'])
print(id_of_this_course)
found_course = course_list.find_one({'_id':ObjectId(id_of_this_course)})
print(found_course)
setup_course['course_id'] = found_course['course_id']
setup_course['instructor'] = found_course['instructor']
setup_course['course_name']= found_course['course_name']
setup_course['blocks'] = found_course['blocks']
setup_course['semester_name']=semester_name
output.append(setup_course)
return output
@task(bind = True,queue='read_tasks')
def get_course_offerings_by_semester_two(self,email,year,semester):
db = client.students
student_collection = db.students
school_collection = db.school_list
course_offerings =db.semester_courses_ref
course_list = db.course_list
# print(email)
who_i_am =student_collection.find_one({'email':email})
school_i_go_to = who_i_am['school']
school_address = who_i_am['address']
# print(school_i_go_to)
my_school =school_collection.find_one({'$and': [{'address': school_address}, {'name': school_i_go_to}]})
# year is missing
output = []
for yr in my_school['year']:
if yr['year_name']== year:
all_semesters = yr['semesters']
for als in all_semesters:
if als['semester_name'] == semester:
semester_ref = als['semester_courses_ref']
semester_name = als['semester_name']
course_ref_list = course_offerings.find_one({'_id':ObjectId(semester_ref)})
courses_held = course_ref_list['courses_held']
for cor in courses_held:
# prepare to trim the stuff we dont need
setup_course = {}
id_of_this_course = str(cor['course_id'])
print(id_of_this_course)
found_course = course_list.find_one({'_id':ObjectId(id_of_this_course)})
print(found_course)
setup_course['course_id'] = found_course['course_id']
setup_course['instructor'] = found_course['instructor']
setup_course['course_name']= found_course['course_name']
setup_course['semester_name']=semester_name
output.append(setup_course)
return output
@task(bind = True, queue='write_tasks')
def get_normal_schedule_two(self,data):
db = client.students
assigned = db.assigned_schedule
email = data['email']
# print(email)
val =assigned.find_one({'email':email})
# print(val)
if val is None:
return "null"
else:
return val['classes']
@task(bind=True, queue='read_tasks')
def add_classes_to_database_two(self, data):
db = client.students
students_collection = db.students
school_collection = db.school_list
course_list = db.course_list
course_offerings =db.semester_courses_ref
assigned = db.assigned_schedule
# {'username': 't1@t1.com',
# 'year': '2015', 'course_id': 'CSE 201',
# 'days': ['M', 'Tu', 'W'], 'course_name': 'Comp Sci',
# 'semester': 'Fall', 'new_year_flag': False,
# 'instructor': 'Poodle', 'start_period': '0', 'end_period': '3'}
username= data['username']
course_id=data['course_id']
course_name=data['course_name']
instructor=data['instructor']
# data['school'] = ''
blocks={}
blocks['days']=data['days']
blocks['start_period']= data['start_period']
blocks['end_period']= data['end_period']
# days=data['days'] #= ['','']
#start_period=data['start_period']
#end_period=data['end_period']
year=data['year']
semester=data['semester']
myself = students_collection.find_one({'email': username})
address_of_school = myself['address']
school_name = myself['school']
is_new_year=data['new_year_flag']
#the_school_info = school_collection.find_one({'name':school_name, 'address': address_of_school})
# info doesnt exist in the schools
# create info
# if newyear and not already in the database
if(is_new_year):
# create year object
course_list= []
courses = []
course_obj_ids=[]
semester = []
#for x in range len(name_of_semesters)
#course_listing_and_semster += {None, name_of_semesters[x]}
year_obj = {'year_name':year,'num_periods_in_a_day': 0,'blocks':[],'semesters':[]}
#school_collection.update_one({'$addToSet': {'year': year_obj}})
course_list.append({'year':year, 'sem_name':semester, 'courses_held':courses})
course_obj_ids.append(semester_courses_ref.insert_one(semester_temp).inserted_id)
#semester.append({'semester_name': semester,'semester_courses_ref': DBRef('semester_courses_ref':ObjectId(course_obj_ids[0])}))
semester.append({'semester_name': semester,'semester_courses_ref': str(course_obj_ids[0])})
year_obj['semester']=semester
# return str(course_obj_ids)
#for index, g in enumerate(name_of_semesters):
# for i in range(len(name_of_semesters)):
# semester+={'semester_name': i,'course_listing': DBRef('course_offerings',course_obj_ids[i])}
school_collection.find_one_and_update({'name':school_name, 'address': address_of_school}, {'$addToSet': {'year': year_obj}})
else:
pass
temp_school = school_collection.find_one({'name':school_name, 'address': address_of_school})
year_sem = None
current_semester = None
# print(temp_school['year'])
for y in temp_school['year']:
if year == y['year_name']:
year_sem = y
break
# print("*******************")
# print(year_sem)
for s in year_sem['semesters']:
print("*******************")
print(semester +"=="+ s['semester_name'])
if semester.lower() == s['semester_name'].lower():
current_semester = s
ref_number = current_semester['semester_courses_ref']
# print(ref_number)
course_data = {'course_id':course_id,'course_name':course_name,'instructor':instructor,'blocks':blocks}
# deference(s['semester_courses_ref'])course_id=data['course_id']
course_name=data['course_name']
instructor=data['instructor']
# update({}, course_data, {upsert:true})
# id_of_course = course_list.insert_one(course_data).inserted_id
course_list.update(course_data, course_data, True)
id_of_inserted_course = course_list.find_one(course_data)
# print(id_of_inserted_course)
id_of_inserted_course = id_of_inserted_course['_id']
# print(id_of_inserted_course)
id_to_insert= {'course_id':ObjectId(id_of_inserted_course)}
course_offerings.update({'_id':ObjectId(ref_number)},{ '$addToSet': {'courses_held': id_to_insert} },True)
# add it the schedule now
# assigned
# insert_into_schedule
course_id=data['course_id']
course_name=data['course_name']
instructor=data['instructor']
# data['school'] = ''
days=data['days'] #= ['','']
#start_period=data['start_period']
#end_period=data['end_period']
##PUT BLOCK INFORMATION HERE
set_add = {'course_id':course_id, 'course_name': course_name, 'instructor': instructor,'blocks':blocks}
assigned.update({'email':username},{'$addToSet':{'classes':set_add}},True)
# .inserted_id
return
@task(bind=True, queue='write_tasks')
def send_a_friend_request_two(self,data):
db = client.students
email_of_requester = data['email_of_sender']
first_name_of_requester = data['first_name_emailer']
last_name_of_requester = data['last_name']
email_of_emailee = data['email_of_sendee']
first_name_of_emailee = data['first_name_emailee']
last_name_of_emailee = data['last_name_emailee']
friend_request_info = {"email_of_requester": email_of_requester,
"first_name_of_requester": first_name_of_requester,
"last_name_of_requester": last_name_of_requester,
"email_of_emailee": email_of_emailee,
"first_of_emailee": first_name_of_emailee,
'last_name_emailee':last_name_of_emailee}
db.friend_requests.insert_one(friend_request_info)
@task(bind=True, queue='read_tasks')
def get_friends_list_two(self,data):
db = client.students
# dat_base_var = "students"
# first_name_var = data['first_name']
# last_name_var = data['last_name']
email_stuff = data['email']
# original_id_2=db.students.insert(info2)
value = db.students.find_one({'email':email_stuff})
friends_loc = str(value['friendslist'])
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
# friends_loc = friends_loc.split("'",1)
# friends_loc = friends_loc[:-1]
# friends_loc = friends_loc[1:]
# print(friends_loc)
list_of_stuff= db.friends_list.find_one({'_id':ObjectId(friends_loc)})
# print(list_of_stuff)
list_of_stuff= list_of_stuff['list']
print(list_of_stuff)
# html = "<html><body> string: "+""+"</body></html>"
# print(list_of_stuff)
return list_of_stuff
@task(bind=True, queue='write_tasks')
def delete_a_student_from_database_two(self,email):
db = client.students
student_collection = db.students
db.students.find_one_and_delete({'email':email})
@task(bind=True, queue='write_tasks')
def delete_friend_from_friends_list_two(self,data):
db = client.students
# self
email_stuff = data['email']
first_name = data['first_name']
last_name =data['last_name']
f_email= data['friend_email']
value = db.students.find_one({'email':email_stuff})
# value_two = db.students.find_one({'email':f_stuff})
friends_loc = str(value['friendslist'])
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
first_name_two=value['first_name']
last_name_two=value['last_name']
friend_ob = db.students.find_one({'email':f_email})
friends_loc_two = str(friend_ob['friendslist'])
# strip the info we dont need
friends_loc_two = friends_loc_two.split(",",1)
friends_loc_two = friends_loc_two[1]
friends_loc_two = friends_loc_two.split("'",2)
friends_loc_two = friends_loc_two[1]
# first_name_two=friend_ob['first_name']
# last_name_two=friend_ob['last_name']
print(first_name_two)
print(last_name_two)
value_two = {'first_name':first_name,'last_name':last_name,'email':f_email}
print(value)
value = {'first_name':first_name_two,'last_name':last_name_two,'email':email_stuff}
print(value_two)
# {'$addToSet': {'year': year_obj}}
list_of_stuff= db.friends_list.find_one_and_update({'_id':ObjectId(friends_loc_two)},{ '$pull': {'list': value} })
list_of_stuff= db.friends_list.find_one_and_update({'_id':ObjectId(friends_loc)},{ '$pull': {'list': value_two} })
# return list_of_stuff
#dont use this yet
@task(bind=True, queue='read_tasks')
def get_schools_address_two(self):
db = client.students
school_collection = db.school_list
name_of_school = data['school_name']
address_of_school = data['address']
schools = school_collection.find_one({'name':name_of_school, 'address': address_of_school})
# schools = school_collection.find({'name':name_of_school, 'address': address_of_school})
array_of_schools=[]
for cus in schools:
# my_values['name'] = cus['name']
# cus['_id']= JSONEncoder().encode(cus['_id'])
array_of_schools.append(cus)
# return_bundle = {'result': array_of_schools}
return json_util.dumps(array_of_schools)
#unfinished
@task(bind=True, queue='write_tasks')
def delete_school_from_database_two(self, data):
# not done
db = client.students
school_collection = db.school_list
return str(student_dict)
@task(bind=True, queue='read_tasks')
def search_all_students_two(self):
db = client.students
student_collection = db.students
students = student_collection.find({})
array_of_students=[]
for stud in students:
array_of_students.append(stud)
return json_util.dumps(array_of_students)
@task(bind=True, queue='read_tasks')
def search_school_from_database_two(self, data=None):
db = client.students
school_collection = db.school_list
schools = None
if data:
name_of_school = data['school_name']
schools = school_collection.find({'name':name_of_school})
else:
schools = school_collection.find()
array_of_schools=[]
for cus in schools:
# my_values['name'] = cus['name']
# cus['_id']= JSONEncoder().encode(cus['_id'])
array_of_schools.append(cus)
# return_bundle = {'result': array_of_schools}
return json_util.dumps(array_of_schools)
# return array_of_schools
@task(bind=True, queue='write_tasks')
def edit_school_to_database_two(self, data,address_of_edit):
db = client.students
school_collection = db.school_list
semester_courses_ref = db.semester_courses_ref
#data= {'name':name_of_school, 'num_days':days_in_a_year, 'num_sem':number_of_sem, 'address':address, 'num_days_in_schedule':num_days_in_a_schedule, 'year_obj':year}
name_of_school = data['name']
days_in_a_year = data['num_days']
address = data['address']
semesters_in_year= data['num_sem']
num_days_in_a_schedule=data['num_days_in_schedule']
name_of_semesters=data['semester_names']
year = data['year_obj']
year_container = []
semester = []
courses = []
course_list =[]
course_obj_ids=[]
course_name_id_tuple=[]
for current_sem_name in name_of_semesters:
course_list.append({'year':year['year_name'], 'sem_name':current_sem_name, 'courses_held':courses})
for semester_temp in course_list:
course_obj_ids.append(semester_courses_ref.insert_one(semester_temp).inserted_id)
for index ,g in enumerate(name_of_semesters):
semester.append({'semester_name': g,'semester_courses_ref': str(course_obj_ids[index])})
#some_val = db.dereference(semester[index])
year['semesters'] = semester
year_container.append(year)
data_input = {'name':name_of_school, 'days_in_a_year': days_in_a_year,
'address':address, 'semesters_in_year':semesters_in_year,
'num_days_in_a_schedule':num_days_in_a_schedule,'name_of_semesters':name_of_semesters,
'year':year_container
}
school_collection.find_one_and_replace({'address':address_of_edit},data_input)
return
@task(bind=True, queue='write_tasks')
def add_school_to_database_two(self, data):
db = client.students
school_collection = db.school_list
semester_courses_ref = db.semester_courses_ref
#data= {'name':name_of_school, 'num_days':days_in_a_year, 'num_sem':number_of_sem, 'address':address, 'num_days_in_schedule':num_days_in_a_schedule, 'year_obj':year}
name_of_school = data['name']
days_in_a_year = data['num_days']
address = data['address']
semesters_in_year= data['num_sem']
num_days_in_a_schedule=data['num_days_in_schedule']
name_of_semesters=data['semester_names']
year = data['year_obj']
year_container = []
semester = []
courses = []
course_list =[]
course_obj_ids=[]
course_name_id_tuple=[]
for current_sem_name in name_of_semesters:
course_list.append({'year':year['year_name'], 'sem_name':current_sem_name, 'courses_held':courses})
for semester_temp in course_list:
course_obj_ids.append(semester_courses_ref.insert_one(semester_temp).inserted_id)
for index ,g in enumerate(name_of_semesters):
semester.append({'semester_name': g,'semester_courses_ref': str(course_obj_ids[index])})
#some_val = db.dereference(semester[index])
year['semesters'] = semester
year_container.append(year)
data_input = {'name':name_of_school, 'days_in_a_year': days_in_a_year,
'address':address, 'semesters_in_year':semesters_in_year,
'num_days_in_a_schedule':num_days_in_a_schedule,'name_of_semesters':name_of_semesters,
'year':year_container
}
id_1 = school_collection.insert_one(data_input)
return
@task(bind = True, queue='write_tasks')
def copy_and_modify_school_two(self, data):
pass
@task(bind = True, queue='write_tasks')
def accept_friend_request_two(self, data):
db = client.students
friend_requests = db.friend_requests
student_collection = db.students
friends_collection = db.friends_list
emailee = data['email_of_sendee']
emailer = data['email_of_requester']
value=friend_requests.find_one_and_delete({'email_of_emailee':emailee, 'email_of_requester':emailer})
sendee_first_name=value['first_of_emailee']
sendee_last_name=value['last_name_emailee']
sender_first_name=value['first_name_of_requester']
sender_last_name=value['last_name_of_requester']
sender_info = student_collection.find_one({'email':emailer})
friends_loc = str(sender_info['friendslist'])
# strip the info we dont need
friends_loc = friends_loc.split(",",1)
friends_loc = friends_loc[1]
friends_loc = friends_loc.split("'",2)
friends_loc = friends_loc[1]
sendee_info = student_collection.find_one({'email':emailee})
friends_loc_two = str(sendee_info['friendslist'])
# strip the info we dont need
friends_loc_two = friends_loc_two.split(",",1)
friends_loc_two = friends_loc_two[1]
friends_loc_two = friends_loc_two.split("'",2)
friends_loc_two = friends_loc_two[1]
send_to_sender_friends= {'first_name': sendee_first_name, 'last_name':sendee_last_name, 'email':emailee}
send_to_sendee_friends= {'first_name': sender_first_name, 'last_name':sender_last_name, 'email':emailer}
# sender
friends_collection.find_one_and_update({'_id':ObjectId(friends_loc)},{ '$addToSet': { 'list': send_to_sender_friends} })
# sendee
friends_collection.find_one_and_update({'_id':ObjectId(friends_loc_two)},{ '$addToSet': { 'list': send_to_sendee_friends} })
# db.friends_list.find_one({'_id':ObjectId(friends_loc)})
@task(bind = True, queue='write_tasks')
def deny_friend_request_two(self, data):
db = client.students
friend_requests = db.friend_requests
emailee = data['email_of_sendee']
emailer = data['email_of_requester']
print(emailee)
print(emailer)
friend_requests.find_one_and_delete({'email_of_emailee':emailee, 'email_of_requester':emailer})
@task(bind = True, queue='read_tasks')
def get_friend_request_two(self, data):
db = client.students
email = data['email_of_sendee']
first_name= data['first_name_emailee']
last_name = data['last_name_emailee']
# "email_of_emailee" : "cheap@gmail.com",
# "last_name_emailee" : "will",
# "first_of_emailee" : "cheap",
friend_requests = db.friend_requests
result = friend_requests.find({'email_of_emailee':email})
# print(result['email_of_requester'])
allRequests= []
for req in result:
# print(result)
allRequests.append(req)
# print("returned")
return json_util.dumps(allRequests)
@task(bind=True, queue='read_tasks')
def possible_friends(self, username, first_name):
# """ render the create school view. """
# Display the create school view if the admin is logged in
db = client.students
students_temp = db.students
friend_requests = db.friend_requests
friends_list = db.friends_list
# Display all possible people we can add by searching a name
#username = name
# Search this person
#first_name = first_name
#last_name = last_name
# find out who i am
print(username)
myself = students_temp.find_one({'email': username})
print(myself)
# cool i go to this cool
school_i_go_to = myself['school']
# lets get all the people with this name and go to the same school as i do
people = []
# students_list =
for person in students_temp.find({'$and': [{'first_name': first_name}, {'school': school_i_go_to}]}):
#people_dict = {'first_name': first_name_var,'last_name': last_name_var, 'email': email_stuff, 'school': school_name, 'friendslist': DBRef('friends_list', friend_info_dict[ "_id"])}
# person['friendslist'] = json.dumps(str(person['friendslist']))
# person['_id'] = str(person['_id'])
del person['friendslist']
del person['_id']
del person['school']
print(person)
people.append(person)
# go to this place
# print people
all_my_friends_complete = friends_list.find_one(myself['friendslist'])
#all_my_friends_complete = DBRef('friends_list', friend_info_dict["_id"])
# get the list itself
all_my_friends = None
if all_my_friends_complete:
all_my_friends = all_my_friends_complete['list']
# get all the requests assocaited with this person. Both sender or reciever
all_my_requests = []
for req in friend_requests.find({'$or': [{'email_of_requester': username}, {'email_of_emailee': username}]}):
all_my_requests.append(req)
if (not all_my_friends):
# this checks if the word non is on the list remove it
# this means you have no friends
# now we know that we have no friends
# this means that we cannot remove it from the list
# lets check if we can remove from ppl already requested
#my_friendslist_id = all_my_friends_complete['_id']
# print my_friendslist_id
# db.friends_list.update( { "_id": my_friendslist_id }, { "$pop": { "list": -1 } } ))
# print all_my_requests
if (not all_my_requests or len(all_my_requests) == 0):
# well shit there are no requests either
# nothing we can do show everything
x = ""
else:
x = ""
# we must people - all_my_requests
for pe in people:
# print str(pe) + "\n"
for rq in all_my_requests:
# print str(rq)+"\n"
if (pe['email'] == rq['email_of_requester'] or pe['email'] == rq['email_of_emailee']):
people.remove(pe)
# requests were made and need to be removed
else:
# you have friends do something about it
# remove all your friends
# print all_my_friends
# we must people - all_my_requests
for pe in people:
# print str(pe) + "\n"
for af in all_my_friends:
# print str(af)+"\n"
if (pe['email'] == af['email']):
people.remove(pe)
if (not all_my_requests or len(all_my_requests) == 0):
# we found no current requests
x = ""
else:
# we must people - all_my_requests
for pe in people:
# print str(pe) + "\n"
for rq in all_my_requests:
# print str(rq)+"\n"
if (pe['email'] == rq['email_of_requester'] or pe['email'] == rq['email_of_emailee']):
people.remove(pe)
# print people
# print "success"
# html = "<html><body> string: "+"success"+"</body></html>"
return_dict = {'success': 'success'}
print(people)
return people
@task(bind=True, queue='read_tasks')
def get_a_person_two(self, data):
email = data['email']
db = client.students
students_temp = db.students
value = students_temp.find_one({'email':email})
return json_util.dumps(value)
@task
def mul(x, y):
# html = "<html><body> string: "+""+"</body></html>"
# return x + y
return x * y
@task
def xsum(numbers):
return sum(numbers)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Compare artifacts between runs
"""
# pylint: disable=redefined-builtin, self-assigning-variable, broad-except
import csv
import glob
import logging
import sys
import os
import pandas as pd
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error, Failure
from runs.taurus import reader as taurus_reader
from runs.storage import LocalStorage, S3Storage
from utils import Timer, get_sub_dirs
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, format="%(message)s", level=logging.INFO)
class CompareReportGenerator():
def __init__(self, path, env_name, local_run):
self.artifacts_dir = path
self.current_run_name = os.path.basename(path)
self.env_name = env_name
storage_class = LocalStorage if local_run else S3Storage
self.storage = storage_class(self.artifacts_dir, self.env_name)
self.junit_reporter = None
self.pandas_result = None
self.pass_fail = True
def gen(self):
"""Driver method to get comparison directory, do the comparison of it with current run directory
and then store results
"""
compare_dir, compare_run_name = self.storage.get_dir_to_compare()
if compare_run_name:
self.junit_reporter, self.pandas_result = compare_artifacts(self.storage.artifacts_dir, compare_dir,
self.storage.current_run_name, compare_run_name)
self.pandas_result.to_csv(os.path.join(self.artifacts_dir, "comparison_result.csv"))
else:
logger.warning("The latest run not found for env.")
self.storage.store_results()
return self.junit_reporter
class CompareTestSuite():
"""
Wrapper helper class over JUnit parser Test Suite
"""
result_types = {"pass": [lambda x: None, "tests"],
"fail": [Failure, "failures"],
"error": [Error, "errors"],
"skip": [Skipped, "skipped"]}
def __init__(self, name, hostname, t):
self.ts = TestSuite(name)
self.ts.errors, self.ts.failures, self.ts.skipped, self.ts.tests = 0, 0, 0, 0
self.ts.hostname = hostname
self.ts.timestamp = t.start
def add_test_case(self, name, msg, type):
tc = TestCase(name)
result_type = CompareTestSuite.result_types[type]
tc.result = result_type[0](msg)
self.ts.add_testcase(tc)
setattr(self.ts, result_type[1], getattr(self.ts, result_type[1]) + 1)
def get_log_file(dir, sub_dir):
"""Get metric monitoring log files"""
metrics_file = os.path.join(dir, sub_dir, "metrics.csv")
return metrics_file if os.path.exists(metrics_file) else None
def get_aggregate_val(df, agg_func, col):
"""Get aggregate values of a pandas datframe coulmn for given aggregate function"""
val = None
if str(col) in df:
try:
val = float(getattr(df[str(col)], agg_func)())
except TypeError:
val = None
return val
def compare_values(val1, val2, diff_percent, run_name1, run_name2):
""" Compare percentage diff values of val1 and val2 """
if pd.isna(val1) or pd.isna(val2):
msg = "Either of the value can not be determined. The run1 value is '{}' and " \
"run2 value is {}.".format(val1, val2)
pass_fail, diff, msg = "error", "NA", msg
else:
try:
if val2 != val1:
diff = (abs(val2 - val1) / ((val2 + val1) / 2)) * 100
if diff < float(diff_percent):
pass_fail, diff, msg = "pass", diff, "passed"
else:
msg = "The diff_percent criteria has failed. The expected diff_percent is '{}' and actual " \
"diff percent is '{}' and the '{}' run value is '{}' and '{}' run value is '{}'. ". \
format(diff_percent, diff, run_name1, val1, run_name2, val2)
pass_fail, diff, msg = "fail", diff, msg
else: # special case of 0
pass_fail, diff, msg = "pass", 0, ""
except Exception as e:
msg = "error while calculating the diff for val1={} and val2={}." \
"Error is: {}".format(val1, val2, str(e))
logger.info(msg)
pass_fail, diff, msg = "pass", "NA", msg
return diff, pass_fail, msg
def compare_artifacts(dir1, dir2, run_name1, run_name2):
"""Compare artifacts from dir1 with di2 and store results in out_dir"""
logger.info("Comparing artifacts from %s with %s", dir1, dir2)
sub_dirs_1 = get_sub_dirs(dir1)
over_all_pass = True
aggregates = ["mean", "max", "min"]
header = ["run_name1", "run_name2", "test_suite", "metric", "run1", "run2",
"percentage_diff", "expected_diff", "result", "message"]
rows = [header]
reporter = JUnitXml()
for sub_dir1 in sub_dirs_1:
with Timer("Comparison test suite {} execution time".format(sub_dir1)) as t:
comp_ts = CompareTestSuite(sub_dir1, run_name1 + " and " + run_name1, t)
metrics_file1, metrics_file2 = get_log_file(dir1, sub_dir1), get_log_file(dir2, sub_dir1)
if not (metrics_file1 and metrics_file2):
msg = "Metrics monitoring logs are not captured for {} in either " \
"of the runs.".format(sub_dir1)
logger.info(msg)
rows.append([run_name1, run_name2, sub_dir1, "metrics_log_file_availability",
"NA", "NA", "NA", "NA", "pass", msg])
comp_ts.add_test_case("metrics_log_file_availability", msg, "skip")
continue
metrics_from_file1 = pd.read_csv(metrics_file1)
metrics_from_file2 = pd.read_csv(metrics_file2)
metrics, diff_percents = taurus_reader.get_compare_metric_list(dir1, sub_dir1)
for col, diff_percent in zip(metrics, diff_percents):
for agg_func in aggregates:
name = "{}_{}".format(agg_func, str(col))
val1 = get_aggregate_val(metrics_from_file1, agg_func, col)
val2 = get_aggregate_val(metrics_from_file2, agg_func, col)
diff, pass_fail, msg = compare_values(val1, val2, diff_percent, run_name1, run_name2)
if over_all_pass:
over_all_pass = pass_fail == "pass"
result_row = [run_name1, run_name2, sub_dir1, name, val1, val2,
diff, diff_percent, pass_fail, msg]
rows.append(result_row)
test_name = "{}: diff_percent < {}".format(name, diff_percent)
comp_ts.add_test_case(test_name, msg, pass_fail)
comp_ts.ts.time = t.diff()
comp_ts.ts.update_statistics()
reporter.add_testsuite(comp_ts.ts)
dataframe = pd.DataFrame(rows[1:], columns=rows[0])
return reporter, dataframe
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
from __future__ import division
import base64
import xml.etree.cElementTree as ET
from datetime import datetime
from io import IOBase
from logging import getLogger
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Tuple, Union
from cryptography.hazmat.primitives import hashes, hmac
from .compat import quote
from .constants import (
HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_VALUE_OCTET_STREAM,
FileHeader,
ResultStatus,
)
from .encryption_util import EncryptionMetadata
from .storage_client import SnowflakeStorageClient
from .vendored import requests
if TYPE_CHECKING: # pragma: no cover
from .file_transfer_agent import SnowflakeFileMeta, StorageCredential
logger = getLogger(__name__)
META_PREFIX = "x-amz-meta-"
SFC_DIGEST = "sfc-digest"
AMZ_MATDESC = "x-amz-matdesc"
AMZ_KEY = "x-amz-key"
AMZ_IV = "x-amz-iv"
ERRORNO_WSAECONNABORTED = 10053 # network connection was aborted
EXPIRED_TOKEN = "ExpiredToken"
ADDRESSING_STYLE = "virtual" # explicit force to use virtual addressing style
class S3Location(NamedTuple):
bucket_name: str
path: str
class SnowflakeS3RestClient(SnowflakeStorageClient):
def __init__(
self,
meta: "SnowflakeFileMeta",
credentials: "StorageCredential",
stage_info: Dict[str, Any],
chunk_size: int,
use_accelerate_endpoint: bool = False,
use_s3_regional_url=False,
):
"""Rest client for S3 storage.
Args:
stage_info:
use_accelerate_endpoint:
"""
super().__init__(meta, stage_info, chunk_size, credentials=credentials)
# Signature version V4
# Addressing style Virtual Host
self.region_name: str = stage_info["region"]
# Multipart upload only
self.upload_id: Optional[str] = None
self.etags: Optional[List[str]] = None
self.s3location: "S3Location" = (
SnowflakeS3RestClient._extract_bucket_name_and_path(
self.stage_info["location"]
)
)
self.use_s3_regional_url = use_s3_regional_url
# if GS sends us an endpoint, it's likely for FIPS. Use it.
if stage_info["endPoint"]:
self.endpoint = (
f"https://{self.s3location.bucket_name}." + stage_info["endPoint"]
)
elif use_accelerate_endpoint:
self.endpoint = (
f"https://{self.s3location.bucket_name}.s3-accelerate.amazonaws.com"
)
else:
if self.use_s3_regional_url:
self.endpoint = f"https://{self.s3location.bucket_name}.s3.{self.region_name}.amazonaws.com"
else:
self.endpoint = (
f"https://{self.s3location.bucket_name}.s3.amazonaws.com"
)
@staticmethod
def sign(secret_key, msg):
h = hmac.HMAC(secret_key, hashes.SHA1())
h.update(msg)
return base64.encodebytes(h.finalize()).strip()
@staticmethod
def _construct_canonicalized_element(
bucket_name: str = None,
request_uri: str = "",
subresource: Dict[str, Union[str, int, None]] = None,
) -> str:
if not subresource:
subresource = {}
res = ""
if bucket_name:
res += f"/{bucket_name}"
if request_uri:
res += "/" + request_uri
else:
# for GET operations without a bucket name
res += "/"
if subresource:
res += "?"
keys = sorted(subresource.keys())
res += (
keys[0]
if subresource[keys[0]] is None
else f"{keys[0]}={subresource[keys[0]]}"
)
for k in keys[1:]:
query_str = k if subresource[k] is None else f"{k}={subresource[k]}"
res += f"&{query_str}"
return res
@staticmethod
def construct_canonicalized_headers(
headers: Dict[str, Union[str, List[str]]]
) -> str:
_res = sorted([[k.lower(), v] for k, v in headers.items()])
res = []
for i in range(len(_res)):
k, v = _res[i]
# if value is a list, convert to string delimited by comma
if isinstance(v, list):
v = ",".join(v)
# if multiline header, replace withs space
k = k.replace("\n", " ")
res.append(k.rstrip() + ":" + v.lstrip())
ans = "\n".join(res)
if ans:
ans = ans + "\n"
return ans
@staticmethod
def _construct_string_to_sign(
verb: str,
canonicalized_element: str,
canonicalized_headers: str,
amzdate: str,
content_md5: str = "",
content_type: str = "",
) -> bytes:
res = verb + "\n" + content_md5 + "\n" + content_type + "\n"
res += amzdate + "\n" + canonicalized_headers + canonicalized_element
return res.encode("UTF-8")
@staticmethod
def _has_expired_token(response: requests.Response) -> bool:
"""Extract error code and error message from the S3's error response.
Expected format:
https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
Args:
response: Rest error response in XML format
Returns: True if the error response is caused by token expiration
"""
if response.status_code != 400:
return False
message = response.text
if not message or message.isspace():
return False
err = ET.fromstring(message)
return err.find("Code").text == EXPIRED_TOKEN
@staticmethod
def _extract_bucket_name_and_path(stage_location) -> "S3Location":
# split stage location as bucket name and path
bucket_name, _, path = stage_location.partition("/")
if path and not path.endswith("/"):
path += "/"
return S3Location(bucket_name=bucket_name, path=path)
def _send_request_with_authentication_and_retry(
self,
url: str,
verb: str,
resources: str,
retry_id: Union[int, str],
x_amz_headers: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
content_type: str = "",
data: Union[bytes, bytearray, IOBase, None] = None,
) -> requests.Response:
if not x_amz_headers:
x_amz_headers = {}
if not headers:
headers = {}
def generate_authenticated_url_and_args() -> Tuple[bytes, Dict[str, bytes]]:
t = datetime.utcnow()
amzdate = t.strftime("%Y%m%dT%H%M%SZ")
if "AWS_TOKEN" in self.credentials.creds:
x_amz_headers["x-amz-security-token"] = self.credentials.creds.get(
"AWS_TOKEN"
)
_x_amz_headers = self.construct_canonicalized_headers(x_amz_headers)
string_to_sign = self._construct_string_to_sign(
verb, resources, _x_amz_headers, amzdate, content_type=content_type
)
signature = self.sign(
self.credentials.creds["AWS_SECRET_KEY"].encode("UTF-8"), string_to_sign
)
authorization_header = ( # TODO
"AWS " + self.credentials.creds["AWS_KEY_ID"] + ":" + signature.decode()
)
headers.update(x_amz_headers)
headers["Date"] = amzdate
headers["Authorization"] = authorization_header
rest_args = {"headers": headers}
if data:
rest_args["data"] = data
return url, rest_args
return self._send_request_with_retry(
verb, generate_authenticated_url_and_args, retry_id
)
def get_file_header(self, filename: str) -> Union[FileHeader, None]:
"""Gets the metadata of file in specified location.
Args:
filename: Name of remote file.
Returns:
None if HEAD returns 404, otherwise a FileHeader instance populated with metadata
"""
path = quote(self.s3location.path + filename.lstrip("/"))
url = self.endpoint + f"/{path}"
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, request_uri=path
)
retry_id = "HEAD"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url, "HEAD", _resource, retry_id
)
if response.status_code == 200:
self.meta.result_status = ResultStatus.UPLOADED
metadata = response.headers
encryption_metadata = (
EncryptionMetadata(
key=metadata.get(META_PREFIX + AMZ_KEY),
iv=metadata.get(META_PREFIX + AMZ_IV),
matdesc=metadata.get(META_PREFIX + AMZ_MATDESC),
)
if metadata.get(META_PREFIX + AMZ_KEY)
else None
)
return FileHeader(
digest=metadata.get(META_PREFIX + SFC_DIGEST),
content_length=int(metadata.get("Content-Length")),
encryption_metadata=encryption_metadata,
)
elif response.status_code == 404:
logger.debug(
f"not found. bucket: {self.s3location.bucket_name}, path: {path}"
)
self.meta.result_status = ResultStatus.NOT_FOUND_FILE
return None
else:
response.raise_for_status()
def _prepare_file_metadata(self) -> Dict[str, Any]:
"""Construct metadata for a file to be uploaded.
Returns: File metadata in a dict.
"""
s3_metadata = {
META_PREFIX + SFC_DIGEST: self.meta.sha256_digest,
}
if self.encryption_metadata:
s3_metadata.update(
{
META_PREFIX + AMZ_IV: self.encryption_metadata.iv,
META_PREFIX + AMZ_KEY: self.encryption_metadata.key,
META_PREFIX + AMZ_MATDESC: self.encryption_metadata.matdesc,
}
)
return s3_metadata
def _initiate_multipart_upload(self) -> None:
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}?uploads"
s3_metadata = self._prepare_file_metadata()
# initiate multipart upload
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource={"uploads": None},
)
retry_id = "Initiate"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url,
"POST",
_resource,
retry_id,
x_amz_headers=s3_metadata,
content_type=HTTP_HEADER_VALUE_OCTET_STREAM,
headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},
)
if response.status_code == 200:
self.upload_id = ET.fromstring(response.content)[2].text
self.etags = [None] * self.num_of_chunks
else:
response.raise_for_status()
def _upload_chunk(self, chunk_id: int, chunk: bytes):
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}"
if self.num_of_chunks == 1: # single request
s3_metadata = self._prepare_file_metadata()
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, request_uri=path
)
response = self._send_request_with_authentication_and_retry(
url,
"PUT",
_resource,
chunk_id,
data=chunk,
x_amz_headers=s3_metadata,
headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},
content_type=HTTP_HEADER_VALUE_OCTET_STREAM,
)
response.raise_for_status()
else:
# multipart PUT
chunk_url = url + f"?partNumber={chunk_id+1}&uploadId={self.upload_id}"
query_params = {"partNumber": chunk_id + 1, "uploadId": self.upload_id}
chunk_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource=query_params,
)
response = self._send_request_with_authentication_and_retry(
chunk_url, "PUT", chunk_resource, chunk_id, data=chunk
)
if response.status_code == 200:
self.etags[chunk_id] = response.headers["ETag"]
response.raise_for_status()
def _complete_multipart_upload(self) -> None:
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}?uploadId={self.upload_id}"
logger.debug("Initiating multipart upload complete")
# Complete multipart upload
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource={"uploadId": self.upload_id},
)
root = ET.Element("CompleteMultipartUpload")
for idx, etag_str in enumerate(self.etags):
part = ET.Element("Part")
etag = ET.Element("ETag")
etag.text = etag_str
part.append(etag)
part_number = ET.Element("PartNumber")
part_number.text = str(idx + 1)
part.append(part_number)
root.append(part)
retry_id = "Complete"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url,
"POST",
_resource,
retry_id,
data=ET.tostring(root),
)
response.raise_for_status()
def _abort_multipart_upload(self) -> None:
if self.upload_id is None:
return
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}?uploadId={self.upload_id}"
retry_id = "Abort"
self.retry_count[retry_id] = 0
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource={"uploadId": self.upload_id},
)
response = self._send_request_with_authentication_and_retry(
url, "DELETE", _resource, retry_id
)
response.raise_for_status()
def download_chunk(self, chunk_id: int) -> None:
logger.debug(f"Downloading chunk {chunk_id}")
path = quote(self.s3location.path + self.meta.src_file_name.lstrip("/"))
url = self.endpoint + f"/{path}"
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, request_uri=path
)
if self.num_of_chunks == 1:
response = self._send_request_with_authentication_and_retry(
url, "GET", _resource, chunk_id
)
if response.status_code == 200:
self.write_downloaded_chunk(0, response.content)
self.meta.result_status = ResultStatus.DOWNLOADED
response.raise_for_status()
else:
chunk_size = self.chunk_size
if chunk_id < self.num_of_chunks - 1:
_range = f"{chunk_id * chunk_size}-{(chunk_id+1)*chunk_size-1}"
else:
_range = f"{chunk_id * chunk_size}-"
response = self._send_request_with_authentication_and_retry(
url,
"GET",
_resource,
chunk_id,
headers={"Range": f"bytes={_range}"},
)
if response.status_code in (200, 206):
self.write_downloaded_chunk(chunk_id, response.content)
response.raise_for_status()
def transfer_accelerate_config(self) -> bool:
url = self.endpoint + "/?accelerate"
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, subresource={"accelerate": None}
)
retry_id = "accelerate"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url, "GET", _resource, retry_id
)
if response.status_code == 200:
config = ET.fromstring(response.text)
use_accelerate_endpoint = (
config.find("Status") and config.find("Status").text == "Enabled"
)
logger.debug(f"use_accelerate_endpoint: {use_accelerate_endpoint}")
return use_accelerate_endpoint
return False
|
nilq/baby-python
|
python
|
from mailbox import MMDF
from django_mail_admin.transports.generic import GenericFileMailbox
class MMDFTransport(GenericFileMailbox):
_variant = MMDF
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import rospkg
import rospy
import yaml
from duckietown_msgs.msg import AprilTagDetectionArray, Twist2DStamped
import numpy as np
import tf.transformations as tr
from geometry_msgs.msg import PoseStamped, Point
class AprilFollow(object):
def __init__(self):
self.node_name = "follow_apriltags_node"
self.pose = Point()
# -------- subscriber --------
self.sub_pose = rospy.Subscriber("~input", Point, self.callback, queue_size=1)
# -------- publisher --------
self.pub_car_cmd = rospy.Publisher("~car_cmd", Twist2DStamped, queue_size=1)
print ("Start to follow apriltags:")
def callback(self, msg):
self.pose = msg
self.car_cmd()
self.stop()
def car_cmd(self):
cmd = Twist2DStamped()
if self.pose.z > 0.15:#if the tag is too far
cmd.v = 0.2
elif self.pose.z < 0.20: #if the tag is too close
cmd.v = -0.2
else: # distance is between 0.15~0.20
cmd.v = 0
if self.pose.x > 0.02: #if the tag is at right side
cmd.omega = -1.8
elif self.pose.x < -0.02: #if the tag is at left side
cmd.omega = 1.8
else: # do not turn
cmd.omega = 0
#publish the cmd
self.pub_car_cmd.publish(cmd)
# make the robot stop
def stop(self):
rospy.sleep(0.2)
cmd = Twist2DStamped()
cmd.v = 0
cmd.omega = 0
#publish the cmd
self.pub_car_cmd.publish(cmd)
if __name__ == '__main__':
rospy.init_node('AprilPostPros',anonymous=False)
node = AprilFollow()
rospy.spin()
|
nilq/baby-python
|
python
|
import argparse
import os
import logging
import numpy as np
from tqdm import tqdm
from collections import OrderedDict
import re
import torch
import torch.nn.functional as F
from core.configs import cfg
from core.datasets import build_dataset
from core.models import build_feature_extractor, build_classifier
from core.utils.misc import mkdir, AverageMeter, intersectionAndUnionGPU, get_color_pallete
from core.utils.logger import setup_logger
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def inference(feature_extractor, classifier, image, label, flip=True):
size = label.shape[-2:]
if flip:
image = torch.cat([image, torch.flip(image, [3])], 0)
with torch.no_grad():
output = classifier(feature_extractor(image))
output = F.interpolate(output, size=size, mode='bilinear', align_corners=True)
output = F.softmax(output, dim=1)
if flip:
output = (output[0] + output[1].flip(2)) / 2
else:
output = output[0]
return output.unsqueeze(dim=0)
def transform_color(pred):
synthia_to_city = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 10,
10: 11,
11: 12,
12: 13,
13: 15,
14: 17,
15: 18,
}
label_copy = 255 * np.ones(pred.shape, dtype=np.float32)
for k, v in synthia_to_city.items():
label_copy[pred == k] = v
return label_copy.copy()
def test(cfg):
logger = logging.getLogger("ICCV2021.tester")
logger.info("Start testing")
device = torch.device(cfg.MODEL.DEVICE)
feature_extractor = build_feature_extractor(cfg)
feature_extractor.to(device)
classifier = build_classifier(cfg)
classifier.to(device)
if cfg.resume:
logger.info("Loading checkpoint from {}".format(cfg.resume))
checkpoint = torch.load(cfg.resume, map_location=torch.device('cpu'))
feature_extractor_weights = strip_prefix_if_present(checkpoint['feature_extractor'], 'module.')
feature_extractor.load_state_dict(feature_extractor_weights)
classifier_weights = strip_prefix_if_present(checkpoint['classifier'], 'module.')
classifier.load_state_dict(classifier_weights)
feature_extractor.eval()
classifier.eval()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
torch.cuda.empty_cache()
dataset_name = cfg.DATASETS.TEST
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
test_data = build_dataset(cfg, mode='test', is_source=False)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False, num_workers=4,
pin_memory=True, sampler=None)
for batch in tqdm(test_loader):
x, y, name = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).long()
pred = inference(feature_extractor, classifier, x, y, flip=False)
output = pred.max(1)[1]
intersection, union, target = intersectionAndUnionGPU(output, y, cfg.MODEL.NUM_CLASSES, cfg.INPUT.IGNORE_LABEL)
intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()
intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
# save prediction map
pred = pred.cpu().numpy().squeeze().argmax(0)
if 'synthia' in cfg.DATASETS.SOURCE_TRAIN:
pred = transform_color(pred)
mask = get_color_pallete(pred, "city")
mask_filename = name[0].split("/")[1]
mask.save(os.path.join(output_folder, mask_filename))
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = np.mean(iou_class)
mAcc = np.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
for i in range(cfg.MODEL.NUM_CLASSES):
logger.info(
'{} {} iou/accuracy: {:.4f}/{:.4f}.'.format(i, test_data.trainid2name[i], iou_class[i], accuracy_class[i]))
def test_all(cfg):
logger = logging.getLogger("ICCV2021.tester")
logger.info("Start testing")
device = torch.device(cfg.MODEL.DEVICE)
feature_extractor = build_feature_extractor(cfg)
feature_extractor.to(device)
classifier = build_classifier(cfg)
classifier.to(device)
test_data = build_dataset(cfg, mode='test', is_source=False)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False, num_workers=4,
pin_memory=True, sampler=None)
test_stats = []
best_iter = 0
best_miou = 0
best_checkpoint = None
for fname in sorted(os.listdir(cfg.resume)):
if not fname.endswith('.pth'):
continue
logger.info("Loading checkpoint from {}".format(os.path.join(cfg.resume, fname)))
checkpoint = torch.load(os.path.join(cfg.resume, fname))
feature_extractor_weights = strip_prefix_if_present(checkpoint['feature_extractor'], 'module.')
feature_extractor.load_state_dict(feature_extractor_weights)
classifier_weights = strip_prefix_if_present(checkpoint['classifier'], 'module.')
classifier.load_state_dict(classifier_weights)
feature_extractor.eval()
classifier.eval()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
torch.cuda.empty_cache()
dataset_name = cfg.DATASETS.TEST
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name, fname.replace('.pth', ''))
mkdir(output_folder)
for batch in tqdm(test_loader):
x, y, name = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).long()
pred = inference(feature_extractor, classifier, x, y, flip=False)
output = pred.max(1)[1]
intersection, union, target = intersectionAndUnionGPU(output, y, cfg.MODEL.NUM_CLASSES,
cfg.INPUT.IGNORE_LABEL)
intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()
intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
# save prediction map
pred = pred.cpu().numpy().squeeze().argmax(0)
if 'synthia' in cfg.DATASETS.SOURCE_TRAIN:
pred = transform_color(pred)
mask = get_color_pallete(pred, "city")
mask_filename = name[0].split("/")[1]
mask.save(os.path.join(output_folder, mask_filename))
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = np.mean(iou_class)
mAcc = np.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
iter_num = int(re.findall(r'\d+', fname)[0])
rec = {'iters': iter_num, 'mIoU': mIoU}
logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
for i in range(cfg.MODEL.NUM_CLASSES):
rec[test_data.trainid2name[i]] = iou_class[i]
logger.info('{} {} iou/accuracy: {:.4f}/{:.4f}.'.format(i, test_data.trainid2name[i], iou_class[i],
accuracy_class[i]))
test_stats.append(rec)
if mIoU > best_miou:
best_iter = iter_num
best_miou = mIoU
best_checkpoint = checkpoint
logger.info('Best result is got at iters {} with mIoU {:.4f}.'.format(best_iter, best_miou))
with open(os.path.join(cfg.resume, 'test_results.csv'), 'w') as handle:
for i, rec in enumerate(test_stats):
if i == 0:
handle.write(','.join(list(rec.keys())) + '\n')
line = [str(rec[key]) for key in rec.keys()]
handle.write(','.join(line) + '\n')
torch.save(best_checkpoint,
os.path.join(cfg.resume, 'model_best.pth'))
def main():
parser = argparse.ArgumentParser(description="PyTorch Semantic Segmentation Testing")
parser.add_argument("-cfg",
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
torch.backends.cudnn.benchmark = True
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("ICCV2021", save_dir, 0)
logger.info(cfg)
logger.info("Loaded configuration file {}".format(args.config_file))
logger.info("Running with config:\n{}".format(cfg))
if os.path.isdir(cfg.resume):
test_all(cfg)
else:
test(cfg)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from __future__ import annotations
import asyncio
import json
import logging
import sys
from datetime import datetime, timedelta
from typing import Tuple, Union, List
from urllib.parse import quote
import aiohttp
from aiohttp import ClientSession, ClientResponseError
from bs4 import BeautifulSoup
from furl import furl
from imow.common.actions import IMowActions
from imow.common.consts import IMOW_OAUTH_URI, IMOW_API_URI
from imow.common.exceptions import (
LoginError,
ApiMaintenanceError,
LanguageNotFoundError,
)
from imow.common.messages import Messages
from imow.common.mowerstate import MowerState
from imow.common.package_descriptions import *
logger = logging.getLogger("imow")
try:
assert sys.version_info >= (int(python_major), int(python_minor))
except AssertionError:
raise RuntimeError(
f"{package_name!r} requires Python {python_major}.{python_minor}+ (You have Python {sys.version})"
)
if (
sys.version_info[0] == 3
and sys.version_info[1] >= 8
and sys.platform.startswith("win")
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
class IMowApi:
def __init__(
self,
email: str = None,
password: str = None,
token: str = None,
aiohttp_session: ClientSession = None,
lang: str = "en",
) -> None:
self.http_session: ClientSession = aiohttp_session
self.csrf_token: str = ""
self.requestId: str = ""
self.access_token: str = token
self.token_expires: datetime = None
self.api_email: str = email
self.api_password: str = password
self.lang = lang
self.messages_user = None
self.messages_en = None
async def close(self):
"""Cleanup the aiohttp Session"""
await self.http_session.close()
async def check_api_maintenance(self) -> None:
url = "https://app-api-maintenance-r-euwe-4bf2d8.azurewebsites.net/maintenance/"
headers = {
"Authorization": "",
}
response = await self.api_request(url, "GET", headers=headers)
status = json.loads(await response.text())
logger.debug(status)
if status["serverDisrupted"] or status["serverDown"]:
msg = (
f"iMow API is under Maintenance -> "
f'serverDisrupted: {status["serverDisrupted"]}, serverDown: {status["serverDown"]}, '
f'affectedTill {status["affectedTill"]}'
)
await self.http_session.close()
raise ApiMaintenanceError(msg)
async def get_token(
self,
email: str = "",
password: str = "",
force_reauth=False,
return_expire_time=False,
) -> Union[Tuple[str, datetime], str]:
"""
look for a token, if present, return. Else authenticate and store new token
:param return_expire_time:
:param email: stihl webapp login email non-url-encoded
:param password: stihl webapp login password
:param force_reauth: Force a re-authentication with username and password
:return: tuple, the access token and a datetime object containing the expire date
"""
if not self.access_token or force_reauth:
if email and password:
self.api_password = password
self.api_email = email
if force_reauth:
self.http_session = None
self.csrf_token = None
self.requestId = None
self.access_token: str = ""
self.token_expires: datetime = None
if not self.api_email and not self.api_password:
raise LoginError(
"Got no credentials to authenticate, please provide"
)
await self.__authenticate(self.api_email, self.api_password)
logger.debug("Get Token: Re-Authenticate")
await self.validate_token()
if return_expire_time:
return self.access_token, self.token_expires
else:
return self.access_token
async def validate_token(self, explicit_token: str = None) -> bool:
old_token = None
if explicit_token:
# save old instance token and place temp token for validation
old_token = self.access_token
self.access_token = explicit_token
await self.receive_mowers()
if explicit_token:
# Reset instance token
self.access_token = old_token
return True
async def __authenticate(
self, email: str, password: str
) -> [str, str, aiohttp.ClientResponse]:
"""
try the authentication request with fetched csrf and requestId payload
:param email: stihl webapp login email non-url-encoded
:param password: stihl webapp login password
:return: the newly created access token, and expire time besides the legacy response
"""
await self.__fetch_new_csrf_token_and_request_id()
url = f"{IMOW_OAUTH_URI}/authentication/authenticate/?lang={self.lang}"
encoded_mail = quote(email)
encoded_password = quote(password)
payload = (
f"mail={encoded_mail}&password={encoded_password}"
f"&csrf-token={self.csrf_token}&requestId={self.requestId} "
)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
}
response = await self.api_request(
url, "POST", payload=payload, headers=headers
)
response_url_query_args = furl(response.real_url).fragment.args
if "access_token" not in response_url_query_args:
raise LoginError(
"STIHL iMow did not return an access_token, check your credentials"
)
self.access_token = response_url_query_args["access_token"]
self.token_expires = datetime.now() + timedelta(
seconds=int(response_url_query_args["expires_in"])
)
return self.access_token, self.token_expires, response
async def __fetch_new_csrf_token_and_request_id(self) -> [str, str]:
"""
Fetch a new csrf_token and requestId to do the authentication as expected by the api
csrf_token and requestId are used as payload within authentication
"""
# URL needs whole redirect query parameter
url = (
f"{IMOW_OAUTH_URI}/authentication/?lang=de_DE&authorizationRedirectUrl=https%3A%2F%2Foauth2"
".imow.stihl.com%2Fauthorization%2F%3Fresponse_type%3Dtoken%26client_id%3D9526273B-1477-47C6-801C"
"-4356F58EF883%26redirect_uri%3Dhttps%253A%252F%252Fapp.imow.stihl.com%252F%2523%252Fauthorize%26state"
)
response = await self.api_request(url, "GET")
soup = BeautifulSoup(await response.text(), "html.parser")
try:
upstream_csrf_token = soup.find(
"input", {"name": "csrf-token"}
).get("value")
upstream_request_id = soup.find(
"input", {"name": "requestId"}
).get("value")
except AttributeError:
raise ProcessLookupError(
"Did not found necessary csrf token and/or request id in html source"
)
self.csrf_token = upstream_csrf_token
self.requestId = upstream_request_id
logger.debug("CSRF: new token and request id <Redacted>")
return self.csrf_token, self.requestId
async def fetch_messages(self):
try:
url_en = (
f"https://app.imow.stihl.com/assets/i18n/animations/en.json"
)
response_en = await self.http_session.request("GET", url_en)
i18n_en = json.loads(await response_en.text())
self.messages_en = Messages(i18n_en)
if self.lang != "en":
url_user = f"https://app.imow.stihl.com/assets/i18n/animations/{self.lang}.json"
response_user = await self.http_session.request(
"GET", url_user
)
i18n_user = json.loads(await response_user.text())
self.messages_user = Messages(i18n_user)
else:
self.messages_user = self.messages_en
except ClientResponseError as e:
if e.status == 404:
await self.close()
raise LanguageNotFoundError(
f"Language-File '{self.lang}.json' not found on imow upstream ("
f"https://app.imow.stihl.com/assets/i18n/animations/{self.lang}.json)"
)
async def api_request(
self, url, method, payload=None, headers=None
) -> aiohttp.ClientResponse:
"""
Do a standardized request against the stihl imow webapi, with predefined headers
:param url: The target URL
:param method: The Method to use
:param payload: optional payload
:param headers: optional update headers
:return: the aiohttp.ClientResponse
"""
if not self.http_session or self.http_session.closed:
self.http_session = aiohttp.ClientSession(raise_for_status=True)
if not self.messages_en:
await self.fetch_messages()
if (
self.token_expires
and (self.token_expires - datetime.now()).days <= 1
):
logger.info(
"Fetching new access_token because old one expires in less than 1 day"
)
await self.get_token(force_reauth=True)
if not payload:
payload = {}
headers_obj = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Authorization": f'Bearer {self.access_token if self.access_token else ""}',
"Origin": "https://app.imow.stihl.com",
"DNT": "1",
"Connection": "keep-alive",
"Referer": "https://app.imow.stihl.com/",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "Trailers",
"Content-Type": "application/json",
}
if headers:
headers_obj.update(headers)
try:
payload_ = await self.http_session.request(
method, url, headers=headers_obj, data=payload
)
payload_.raise_for_status()
return payload_
except ClientResponseError as e:
if e.status == 500:
await self.check_api_maintenance()
raise e
async def intent(
self,
imow_action: IMowActions,
mower_name: str = "",
mower_id: str = "",
mower_external_id: str = "",
startpoint: any = "0",
duration: any = "30",
) -> aiohttp.ClientResponse:
"""
Intent to do a action. This seems to create a job object upstream. The action object contains an action Enum,
the action Value is <MowerExternalId> or <MowerExternalId,DurationInMunitesDividedBy10,StartPoint> if
startMowing is chosen
:param imow_action: Anything from imow.common.actions
:param mower_name: sth to identify which mower is used
:param mower_id: sth to identify which mower is used
:param mower_external_id:
necessary identifier for the mowers for actions.
This is looked up, if only mower_name or mower_id is provided
:param startpoint: point from which the mowing shall start, default to 0
:param duration: minutes of intended mowing defaults, to 30 minutes
:return:
"""
if not mower_external_id and not mower_id and not mower_name:
raise AttributeError(
"Need some mower to work on. Please specify mower_[name|id|action_id]"
)
if not mower_external_id and mower_name:
mower_external_id = await self.get_mower_action_id_from_name(
mower_name
)
if not mower_external_id and mower_id:
mower_external_id = await self.get_mower_action_id_from_id(
mower_id
)
if len(mower_external_id) < 16:
raise AttributeError(
f"Invalid mower_action_id, need exactly 16 chars, got {len(mower_external_id)} in {mower_external_id}"
)
url = f"{IMOW_API_URI}/mower-actions/"
# Check if the user provides a timestamp as duration. We need to pass this plain if so (starttime)
first_action_value_appendix = (
f", {duration if '-' in duration else str(int(duration) / 10)}"
)
if "-" in duration and startpoint == "0":
second_action_value_appendix = ""
else:
second_action_value_appendix = f", {str(startpoint)}"
action_value = (
f"{mower_external_id}{first_action_value_appendix}{second_action_value_appendix}"
if imow_action == IMowActions.START_MOWING
else mower_external_id
)
action_object = {
"actionName": imow_action.value,
"actionValue": action_value
# "0000000123456789,15,0" <MowerExternalId,DurationInMunitesDividedBy10,StartPoint>
# "0000000123456789,15,0" <MowerExternalId,StartTime,EndTime>
}
logger.debug(f"Intend: {action_object}")
payload = json.dumps(action_object)
response = await self.api_request(url, "POST", payload=payload)
logger.debug(f"Sent mower {mower_external_id} to {imow_action}")
return response
async def update_setting(self, mower_id, setting, new_value) -> MowerState:
mower_state = await self.receive_mower_by_id(mower_id)
payload_fields = {
"id": mower_state.id,
"unitFormat": mower_state.unitFormat,
"name": mower_state.name,
"teamable": mower_state.teamable,
"accountId": mower_state.accountId,
"childLock": mower_state.childLock,
"corridorMode": mower_state.corridorMode,
"mappingIntelligentHomeDrive": mower_state.mappingIntelligentHomeDrive,
"rainSensorMode": mower_state.rainSensorMode,
"edgeMowingMode": mower_state.edgeMowingMode,
"asmEnabled": mower_state.asmEnabled,
"gpsProtectionEnabled": mower_state.gpsProtectionEnabled,
"automaticModeEnabled": mower_state.automaticModeEnabled,
"localTimezoneOffset": mower_state.localTimezoneOffset,
"mowingTimeManual": None,
"mowingTime": None,
"team": mower_state.team,
"timeZone": mower_state.timeZone,
}
if payload_fields[setting] != new_value:
payload_fields[setting] = new_value
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Content-Type": "application/json",
"Origin": "https://app.imow.stihl.com",
"Connection": "keep-alive",
"Referer": "https://app.imow.stihl.com/",
"TE": "Trailers",
}
response = await self.api_request(
url=f"{IMOW_API_URI}/mowers/{mower_state.id}/",
method="PUT",
payload=json.dumps(payload_fields, indent=2).encode("utf-8"),
headers=headers,
)
mower_state.replace_state(json.loads(await response.text()))
return mower_state
else:
logger.info(f"{setting} is already {new_value}.")
return await self.receive_mower_by_id(mower_id)
async def get_status_by_name(self, mower_name: str) -> dict:
logger.debug(f"get_status_by_name: {mower_name}")
for mower in await self.receive_mowers():
if mower.name == mower_name:
return mower.status
raise LookupError(
f"Mower with name {mower_name} not found in upstream"
)
async def get_status_by_id(self, mower_id=(str, int)) -> dict:
if not type(mower_id) == str:
mower_id = str(mower_id)
logger.debug(f"get_status_by_id: {mower_id}")
try:
response = await self.receive_mower_by_id(mower_id)
return response.status
except ConnectionError:
raise LookupError(
f"Mower with id {mower_id} not found in upstream"
)
async def get_status_by_action_id(self, mower_action_id: str) -> dict:
logger.debug(f"get_status_by_action_id: {mower_action_id}")
for mower in await self.receive_mowers():
if mower.externalId == mower_action_id:
return mower.status
raise LookupError(
f"Mower with externalId {mower_action_id} not found in upstream"
)
async def get_mower_action_id_from_name(self, mower_name: str) -> str:
logger.debug(f"get_mower_action_id_from_name: {mower_name}")
for mower in await self.receive_mowers():
if mower.name == mower_name:
return mower.externalId
raise LookupError(
f"Mower with name {mower_name} not found in upstream"
)
async def get_mower_action_id_from_id(self, mower_id: str) -> str:
logger.debug(f"get_mower_action_id_from_id: {mower_id}")
try:
response = await self.receive_mower_by_id(mower_id)
return response.externalId
except ConnectionError:
raise LookupError(
f"Mower with id {mower_id} not found in upstream"
)
async def get_mower_id_from_name(self, mower_name: str) -> str:
logger.debug(f"get_mower_id_from_name: {mower_name}")
for mower in await self.receive_mowers():
if mower.name == mower_name:
return mower.id
raise LookupError(
f"Mower with name {mower_name} not found in upstream"
)
async def receive_mowers(self) -> List[MowerState]:
logger.debug(f"receive_mowers:")
mowers = []
response = await self.api_request(f"{IMOW_API_URI}/mowers/", "GET")
for mower in json.loads(await response.text()):
mowers.append(MowerState(mower, self))
logger.debug(mowers)
return mowers
async def receive_mower_by_name(self, mower_name: str) -> MowerState:
logger.debug(f"get_mower_from_name: {mower_name}")
for mower in await self.receive_mowers():
if mower.name == mower_name:
logger.debug(mower)
return mower
raise LookupError(
f"Mower with name {mower_name} not found in upstream"
)
async def receive_mower_by_id(self, mower_id: str) -> MowerState:
logger.debug(f"receive_mower: {mower_id}")
response = await self.api_request(
f"{IMOW_API_URI}/mowers/{mower_id}/", "GET"
)
mower = MowerState(json.loads(await response.text()), self)
logger.debug(mower)
return mower
async def receive_mower_statistics(self, mower_id: str) -> dict:
logger.debug(f"receive_mower_statistics: {mower_id}")
response = await self.api_request(
f"{IMOW_API_URI}/mowers/{mower_id}/statistic/", "GET"
)
stats = json.loads(await response.text())
logger.debug(stats)
return stats
async def receive_mower_week_mow_time_in_hours(
self, mower_id: str
) -> dict:
logger.debug(f"receive_mower_week_mow_time_in_hours: {mower_id}")
response = await self.api_request(
f"{IMOW_API_URI}/mowers/{mower_id}/statistics/week-mow-time-in-hours/",
"GET",
)
mow_times = json.loads(await response.text())
logger.debug(mow_times)
return mow_times
async def receive_mower_start_points(self, mower_id: str) -> dict:
logger.debug(f"receive_mower_start_points: {mower_id}")
response = await self.api_request(
f"{IMOW_API_URI}/mowers/{mower_id}/start-points/", "GET"
)
start_points = json.loads(await response.text())
logger.debug(start_points)
return start_points
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
# # Workshop Notebook
# ## Notebook Introduction
# ### How to Use this Notebook
# ### References
# I know it tradition to have the refences at the end of books, but when you are standing on the shoulders of giants. You thank them first.
# ```{bibliography}
# ```
# ### Thank you!
#
# Also, a huge *thank you* to Adam Lavely (https://github.com/adamlavely) for developing some of the intial notebooks!
# ## Introduction to JupyterLab
# ### Where am I? (JupyterLab Notebook)
# Jupyter is a powerful suite of tools that allows us to do many things.
#
# Jupyter is capable of running **Ju**lia, **Pyt**hon and **R**, as well as some other things.
#
# ### Cells
# Each box is called a cell.
# #### Two types of Cells
# ##### Text
# Text Cells allow you to add text (via Markdown), which includes tables, images, links, bullet lists, numbered lists, LaTeX, blockquote, among other things.
# ###### Table
#
# ```markdown
# | This | is |
# |------|------|
# | a | table|
# ```
#
# | This | is |
# |------|------|
# | a | table|
# ###### Image
# ```markdown
# 
# ```
#
# 
#
#
#
#
# ###### Link
# ```markdown
# [Attribution](https://www.metmuseum.org/art/collection/search/436535)
# ```
# Vincent van Gogh / Public domain
# The Metropolitan Museum of Art, New York - Purchase, The Annenberg Foundation Gift, 1993 -
# [Attribution](https://www.metmuseum.org/art/collection/search/436535)
# ###### Bullet List
# ```markdown
# * I am a
# * bullet
# * list
# ```
# * I am a
# * bullet
# * list
#
#
# ###### Numbered List
# ```markdown
# 1. I am a
# 1. numbered
# 1. list
# ```
# 1. I am a
# 1. numbered
# 1. list
# ###### LaTeX
# ```markdown
# $$e=mc^2$$
# ```
#
#
# $$e=mc^2$$
# ###### Blockquotes
# ```markdown
# > This is a blockquote.
# ```
# > This is a blockquote.
# ##### Code
# Cells can be run using the Run button ► or selecting one of the run options under the Run menu.
#
# Try this out! You can change what is in the cell and rerun the same cell, which is useful for debugging.
# In[1]:
2 + 2
# ### Your turn!
# In a new cell, figure out what **5315 + 5618** is.
# In[2]:
## remove and type out 5315 + 5618
## then hit the play button
# ## Introduction to Python
# In this section, I wanted to introduce a few basic concepts and give an outline of this section.
# ### Comments in Python
# In Python, we can create comments in the code itself. Considering we can use markdown language (as you see here 😁), we won't use this too much in this notebook. Though, here is an example.
#
# Basically, you use the... umm... hashtag? Number sign? Pound sign?
#
# This thing -> #
# In[3]:
# I am a comment in Python
# Here is 2 + 2
2 + 2
# As you can see, these are not "computed" using Python.
# We are just comments for the person looking at this.
# Or... you!
# ### Print Function
# We will being using...
#
# ```python
# print()
# ```
#
# ...several times in this notebook.
#
# *print()* is a function to print out strings, variables, numbers, functions, etc.
#
# Let's use the classic example.
# In[4]:
print( "hello, world!" )
# OR
# In[5]:
print("hello, world!")
# *print()* can do some fun things as well. As in, giving it more than one thing to print with commas between them. This will print both things with spaces.
# In[6]:
print( "hello,", "world!" )
# ### Help Function
# The...
#
# ```python
# help()
# ```
#
# ... function is exactly what it is. It is a function to 🌟 help 🌟 you understand the basic usage of another function.
# In[7]:
help(print)
# ### Resources
# Highly suggest looking for answers using [StackOverflow](https://stackoverflow.com/help/searching)
# ### Common Errors
# One of the most common errors in Python is the dreaded
#
# ```python
# 2 + 2
# 3 + 3
#
# File "<ipython-input-1-0dcc020fd5cb>", line 2
# 3 + 3
# ^
# IndentationError: unexpected indent
# ```
#
# Why does this occur? Well, because Python uses spacing or tabs to distinguish where things like loops, functions, and if/else statements start and end. So, if you add an extra space or tab at the beginning of the statement, you will see this message. If you do, check your spacing.
# ```{note}
# Python can get weird with this issue. As you can, technically, start code wherever as long as you are consistent. The next cell shows an example of this... oddity.
#
# ```
# In[8]:
2+2
3+3
# ### Your turn!
# ## Learning about Variables
# When we are developing our idea, we sometimes need to use values multiple times or change the value based on our code. This concept is where variables become very helpful. Let's look at an example.
#
# In this example, we are adding a few numbers together. In this instance, if all we care about is getting the result (similar to a calculator). Then variables are not needed.
# In[9]:
5 + 3 + 16
# But let's look at an example where we need to get the circumference of a circle using multiple radii. The equation for the circumference of a circle is: $C = 2 \pi r$
# Let's say the radius is 5
# In[10]:
2 * 3.14159265359 * 5
# OK, how about radius 10 and 11 and 4 and ...
# Well, in this example, we might not want to rewrite 3.14159265359 over and over. So, in this case, we want to create a variable for this, and we will call it pi.
# In[11]:
pi = 3.14159265359
# Now, every time we reference the variable called **pi** it will refer to the number **3.14159265359**
#
# Let's try those radii again (10, 11, 4)
# In[12]:
2 * pi * 10
# In[13]:
2 * pi * 11
# In[14]:
2 * pi * 4
# By the way, if you happen to get an error:
# ```javascript
# NameError: name 'pi' is not defined
# ```
# Make sure you go to the cell that has
# ```python
# pi = 3.14159265359
# ```
# and run this cell *first* then try the other calculations.
# ### Type of Variables
# There are multiple types of variables. The most common (and the ones we will talk about) are:
#
# * Integers (whole numbers)
# * Float (Floating points or numbers with a decimal)
# * Text
# * Lists
# * Dictionaries
#
# The nice thing about Python is that we do **not** need to specify (or declare) which type we are using. Python will figure this out for us!
#
# BUT FIRST, a quick detour...
#
# We need to talk about Camel Casing.
# #### Camel Case
# <img src="https://upload.wikimedia.org/wikipedia/commons/c/c8/CamelCase_new.svg" alt="camel case" width="100" style="float:right"/>
# Variable names must be one continuous string of letters/numbers. So, let's say we wanted to create a variable called "number of kittens." Instead calling this variable <em>number of kittens</em>, I would call it <em>numberOfKittens</em>. Why the capitalization? Because it makes it easier to separate the words in the name. As in, <em>numberofkittens</em> vs. <em>numberOfKittens</em>. We have a fun name for this: camel case.
# <cite>File:CamelCase new.svg. (2020, April 15). Wikimedia Commons, the free media repository. Retrieved 15:25, June 3, 2020 from https://commons.wikimedia.org/w/index.php?title=File:CamelCase_new.svg&oldid=411544943.</cite>
# #### Integers or int
# As mentioned, integers are whole numbers. Let's create an example. How about we use our numberOfKittens. We will then set this value to 0. As in, we have 0 kittens.
# In[15]:
numberOfKittens = 0
# One thing we might want to do is to have Python tell us what **type** this variable is. Well, Python has a function for this called
#
# ```python
# type()
# ```
# In[16]:
type( numberOfKittens )
# So this checks out, we made an int, and it is showing us we have an int.
#
# Now, once we have a variable, it is not static. We can change the value as much as we need to. Running the next cell will continually add 10 to our original variable.
#
# Try running this a few times.
# In[17]:
numberOfKittens = numberOfKittens + 10
numberOfKittens
# #### Floating points or floats
# Floats are similar to integers, but with more precision.
# Float comes from a Floating point or a number with a decimal point.
#
# This example starts at 0, but note that this is .0
# Adding the decimal tells Python that we should have a float value instead of an integer.
# In[18]:
aFloatVariable = .0
# Let's again, check the variable type.
# In[19]:
type( aFloatVariable )
# Looks good.
#
# And again, we will add 10 to this. There is something specific interesting here; see if you spot it.
# aFloatVariable = aFloatVariable + 10
# aFloatVariable
# If you guessed "mixing a float and an integer," you got it. Let's see an example.
# ##### Mixing integers and floats
# In Python (3, more specifically), the variable will always take the form of the most precision. So, by default, a float.
# In[20]:
letsSeeWhatHappens = numberOfKittens + aFloatVariable
letsSeeWhatHappens
# We can force variables to be a certain type. We call this 'type-cast' and can be used to:
#
# * make an integer into a float
# * a float to an integer
# * an integer to a string (we have not discussed this yet)
# * a float to a string (we have not discussed this yet)
# * etc...
# ##### type-cast
# ```{note}
# type-cast is temporary. If you do not use a type-cast, the variable will revert to its original variable type.
# ```
# Let's switch our numberOfKittens to a float using
# ```python
# float()
# ```
#
# and turn our aFloatVariable to an integer using
#
# ```python
# int()
# ```
# In[21]:
float(numberOfKittens)
# In[22]:
int(aFloatVariable)
# #### String or str
# So, up to this point, we started our conversation working with numbers. Well, what about the other things that are not numbers... like text? Well, for text, we use something called a String or str.
#
# Strings allow us to capture a single character up to thousands of characters (actually, much more than this). Let's go through a traditional example of "Hello, World!" but with my slight spin to it.
# In[23]:
helloStatement = "Hello, everyone!"
# As you can see, can capture text and other alphanumeric and special characters. There are several unique functions for strings but first, let's double-check and see what type we from our helloStatement.
# In[24]:
type( helloStatement )
# Not too surprising, we see this is type str or string.
# ##### String Indexing/String Slicing
# One of the first ways to interact with our string is to take a look at individual characters by using their **index**.
#
# The **index** is position (or multiple positions) for each character in the string. So, if we look at our string, we have Hello, everyone! If we wanted to see the first letter *H*, we could reference this using the index or the position where the letter is in the string.
# In[25]:
helloStatement[1]
# ohh.. wait a minute. We were expecting the letter *H*, but we got *e*. What happened?
# ```{note}
# For indexes, we always start at the number 0. So, 0 is the first thing, 1 is the second thing, and so on.
# ```
# Let's try this again.
# In[26]:
helloStatement[0]
# There we go!
# Visually, this is how the string looks to Python.
#
# 
# ###### Indexing Multiple Letters
# In[27]:
print( helloStatement[0:5] )
# Wait a second!
#
# 
# The way you should think of this is:
#
# ```python
# helloStatement[0 : 5 - 1]
# helloStatement[(starting number) to (ending number - 1)]
# ```
#
# There is also a shortcut way of writing this, without the 0.
# In[28]:
print( helloStatement[:5] )
# In[29]:
print( helloStatement[5:] )
# ##### String functions
# ###### Formatting
# In[30]:
print( helloStatement.capitalize() )
print( helloStatement.lower() )
# ###### Split
# In[31]:
print( helloStatement.split(" ") )
# ```{note}
# *.split()* will eventually become your best friend. *.split()* is a **great** function to use when using uniquelly spaced data.
# As in comma separated values or CSV.
# ```
# ##### Concatenating Strings
#
# When you want to put two strings together, we say you *concatenate* the strings. There are multiple ways of doing this but presented are what I believe to be the three most common ways.
# ###### + Method
# This is the most straightforward method of the three, but there can be some issues. You simply add a plus sign *+* between your strings. Let's take a look at this.
# In[32]:
print ( "hello, " + "everyone!")
# This works fine, but when you add a number to this idea. We run into issues.
# ```python
# print ( "hello, " + "every" + 1 + "!")
#
# ---------------------------------------------------------------------------
# TypeError Traceback (most recent call last)
# <ipython-input-41-1f53f06cad5c> in <module>
# ----> 1 print ( "hello, " + "every" + 1 + "!")
#
# TypeError: can only concatenate str (not "int") to str
# ```
# In this case we need to *type-cast* the integer as a string using
# ```python
# str()
# ```
# In[33]:
print ( "hello, " + "every" + str(1) + "!")
# ###### % Method
# This is my favorite method out of the three. Let's see how this works with the same example.
#
# In this case, we use a %s (s = string) for each string we want to embed in our overall string.
# In[34]:
print ( "%s, %s" % ("hello", "everyone") )
# There are three parts to this.
#
# *The format*
# * ```python
# "%s, %s"
# ```
#
# *The break*
# * ```python
# %
# ```
#
# *The fill*
# * ```python
# ("hello", "everyone")
# ```
#
# We have two %s, meaning we need to feed it with two strings.
# OK, but what about numbers?
# In[35]:
print ( "%s, %s%s%s" % ("hello","every",1,"!") )
# Still works! This reason is why I like this method. You pick the formating and feed in the strings.
# ###### join() Method
# The .join() method uses a function called
# ```python
# .join()
# ```
# This is a create function to be aware of, as it will allow you the ability to join strings with a specific, static format. What do I mean by static formatting? Well, unlike the % method, that can be formatted exactly how I want it. The .join() method requires a specific pattern. Example time!
# In[36]:
print ( " ".join(["hello, ", "everyone!"]) )
# There are two parts to this.
#
# *The splitter*
# * ```python
# " "
# ```
#
# *The fill*
# * ```python
# .join(["hello, ", "everyone!"])
# ```
#
# Notice that the join has the brackets around it. Technically, you are feeding this an array or list (we have not talked about this yet). This function again, like *.split()*, will be a great asset to you in the future.
#
# Let's show this with our number again.
# ```python
# print ( " ".join(["hello, ", "every", 1, "!"]) )
#
# ---------------------------------------------------------------------------
# TypeError Traceback (most recent call last)
# <ipython-input-54-e926f0c4c025> in <module>
# ----> 1 print ( " ".join(["hello, ", "every", 1, "!"]) )
#
# TypeError: sequence item 2: expected str instance, int found
# ```
# The same issue as before, we need to type-cast.
# In[37]:
print ( " ".join(["hello, ", "every", str(1), "!"]) )
# Notice the spaces? Again, we are saying with *the splitter* what each string is going to be seperated by, so in this case, everything will be split by spaces.
# #### Booleans
# Booleans are used to do comparisions (true/false), (1/0), (yes/no)
# In[38]:
someCondition = True
type( someCondition )
# ##### Boolean Logic
# We will talk about boolean logic more in the next section (Comparisons)
# In[39]:
(someCondition == False)
# In[40]:
if (False):
print( "yes for False!" )
if (True):
print( "yes for True!" )
# ```{note}
# A more "traditional" way to do booleans is to use 0 and 1. In Python, any number other than 0 is True. Including negative numbers and decimals.
# ```
# In[41]:
if (0):
print( "yes for 0!" )
if (1):
print( "yes for 1!" )
if (2):
print( "yes for 2!" )
if (-3):
print( "yes for -3!" )
if (.4):
print( "yes for .4!" )
# ### Lists
# Lists (or also known as Arrays) are exactly that. A list of data.
#
# There are two options for creating a *List*.
#
# 1. Define the list initially
# In[42]:
groceryList = ["apple", "banana", "eggs"]
print( groceryList )
# 2. Create a list and add to it using
#
# ```python
# .append()
# ```
# In[43]:
groceryList = []
groceryList.append("apple")
groceryList.append("banana")
groceryList.append("eggs")
print( groceryList )
# ```{note}
# For indexes, we always start at the number 0. So, 0 is the first thing, 1 is the second thing, and so on.
# ```
# In[44]:
print( groceryList[2] )
print( groceryList[0] )
print( groceryList[1] )
# So what happens if we use an *index* outside of our list?
# ```python
# print( groceryList[3] )
#
# ---------------------------------------------------------------------------
# IndexError Traceback (most recent call last)
# <ipython-input-44-0a77fb05d512> in <module>
# print( groceryList[3] )
#
# IndexError: list index out of range
# ```
# ```{note}
# Typically, going through an array, one index at a time is not how we want to use lists.
# We will talk about going through lists using a *loop* in an upcoming notebook.
# ```
# #### Dictionary
# Dictionaries are used to index based on a specific key. As in:
#
# dictionary[\"street adddress\" (key)] = "123 Apple St." (value)
# In[45]:
personalInformation = {}
personalInformation["streetAddress"] = "123 Apple St."
personalInformation["firstName"] = "Patrick"
personalInformation["lastName"] = "Dudas"
print( personalInformation )
# Note the order.
# Again, to do this more efficiently, we will be using loops (we will talk about later).
# ### Your turn!
# ## Comparison Operators
# We need to be able to compare different variables. We will be working on:
# * Are these things the same?
# * Are these things not the same?
# * How do these things compare?
#
# We can compare any data type, and our output will be a boolean (True or False). The other things we will cover are:
# * Comparing different data types
# * Making multiple comparisons at once
#
# Comparison operators are important on their own (how do these things compare?) and are also useful for sorting and switching (see the next notebook).
# ### Are these things the same?
# #### Numeric Comparisons
# We have already initiated variables by setting something equal to something else - let's do that here by setting kitten 🐈 equal to 10 and then setting dog 🐕 equal to kitten 🐈. Finally, 🐝 bee will be equal to 11.
#
# So...
#
# 🐈 = 10
#
# 🐕 = 🐈
#
# 🐝 = 11
# In[46]:
kitten = 10
dog = kitten
bee = 11
print( "kitten =", kitten, "; dog =", dog, "; bee = ", bee )
# The first comparison operator is '==', which tests to see if two variables are equal.
# In[47]:
print( "kitten =", kitten, "; dog =", dog, "; bee = ", bee )
print( "Is kitten equal to dog?")
print( kitten == dog )
print( "Is kitten equal to bee?")
print( kitten == bee )
# This tells us that kitten is equal to dog, because it returns *True* and kitten is not equal to bee, as that returns *False*.
# #### Character Comparisons
# We can also do comparisons with other variable types. Here's an example with strings instead of integers.
#
# Let's think about some foods, how about:
#
# - food1 = 🍎
# - food2 = 🍪
# - food3 = 🍎
# In[48]:
food1 = 'apple'
food2 = 'cookie'
food3 = 'apple'
print( "food1=", food1,"; food2 =", food2,"; food3 = ", food3 )
print( "Is food1 equal to food2?")
print( food1 == food2 )
print( "Is food1 equal to food3?")
print( food1 == food3 )
# ### Are these things different?
# #### This is Logical... NOT!
# We can also test to see if two values are not equal using the '!=' operator.
# In[49]:
print( "food1 =", food1,"; food2 =", food2,"; food3 =", food3 )
print( "Is food1 not equal to food2?")
print( food1 != food2 )
print( "Is food1 not equal to food3?")
print( food1 != food3 )
# This gives us the opposite of what we had before.
#
# So, what did we learn?
#
# 🍎 == 🍎 = *True*
#
# 🍎 != 🍪 = *True*
# ### How do these things compare?
# #### Math Comparisons 101
# We can also compare the magnitude of values using '<', '<=', '>'and '>=', which will return 'True' if the condition is being met.
# In[50]:
print( "kitten =", kitten, "; dog =", dog, "; bee = ", bee )
# In[51]:
print( "Is kitten less than dog?")
print( kitten < dog )
print( "Is kitten less than or equal to dog?")
print( kitten <= dog )
print( "Is kitten greater than or equal to dog?")
print( kitten >= dog )
print( "Is kitten greater than dog?")
print( kitten > dog )
# ```{note}
# We do have to watch out for our types. Characters and numerics are **not** the same.
# ```
#
# In[52]:
TheCharacters = "10"
TheNumbers = 10
print( "Is TheNumbers equal to TheCharacters?")
print( TheNumbers == TheCharacters )
print( "TheNumbers type is ", type( TheNumbers ), "; and TheCharacters type is ", type( TheCharacters ) )
# We can compare integers and floats (!) but not other disparate data types.
#
# If you let python take care of your data-types, be warned that they could be different from what you think they are!
# ```{note}
# varible = varible is **not** the same thing as variable == variable
#
# varible = varible will **always** return true
# ```
# ### Multiple Comparisons
# We can make multiple comparisons at once by stringing the statements
# * and
# * not
# * or
#
# together.
#
# The individual testable (true/false) components need to be broken apart. For example,
# * If the *V* CATA bus is coming around the corner, then I need to run towards the bus stop.
#
# requires several things for it to be true and to require running. We can break these things out with:
# * If there is a vehicle coming around the corner **AND** that vehicle is a CATA bus **AND** that CATA bus is a V
# * then I need to run towards the bus stop
#
# We will only run towards the bus stop if all of the statements are true.
# #### AND
# ```{note}
# the **and** operator will return True if all of the conditions are met
# ```
# Let's create another scenario for this around clothes. For this, let's assume:
#
# face = 😎
#
# shirt = 👕
#
# pants = 👖
#
#
#
#
#
# In[53]:
face = "sunglasses"
shirt = "tshirt"
pants = "jeans"
print ( "Am I wearing sunglasses and jeans?" )
print (face == "sunglasses")
print (pants == "jeans")
print( (face == "sunglasses") and (pants == "jeans") )
print ( "Am I wearing sweater and jeans?" )
print (shirt == "sweater")
print (pants == "jeans")
print( (shirt == "sweater") and (pants == "jeans") )
# We can also string as many comparisons together as we want.
# In[54]:
print( (1 < 2) and (1 < 3) and (1 < 4) and (1 < 5) and (1 < 6) and (1 < 7) and (1 < 8) )
# #### OR
# ```{note}
# the **or** operator will return True if at least *1* of the conditions is met
# ```
# In[55]:
print( "face =", face, "; shirt =", shirt, "; pants = ", pants )
print ( "Am I wearing sunglasses or jeans?" )
print (face == "sunglasses")
print (pants == "jeans")
print( (face == "sunglasses") or (pants == "jeans") )
print ( "Am I wearing sweater or jeans?" )
print (shirt == "sweater")
print (pants == "jeans")
print( (shirt == "sweater") or (pants == "jeans") )
# #### Not
# ```{note}
# the **not** will reverse or switch the meaning of the and/or operators
# ```
# In[56]:
print( "face =", face, "; shirt =", shirt, "; pants = ", pants )
print ( "Am I wearing sunglasses and not jeans?" )
print (face == "sunglasses")
print (not (pants == "jeans"))
print( (face == "sunglasses") and not (pants == "jeans") )
print ( "Am I wearing jeans and not a sweater?" )
print (not (shirt == "sweater"))
print (pants == "jeans")
print( not (shirt == "sweater") and (pants == "jeans") )
# ### Your Turn!
# Try to fill in code to fulfill the request! Here are some variables used in the exercise
# In[57]:
dogA_color = 'brown'
dogA_mass = 42
dogA_sex = 'male'
dogA_age = 5
dogA_name = 'chip'
dogB_color = 'white'
dogB_mass = 19
dogB_sex = 'female'
dogB_age = 2
dogB_name = 'lady'
# Is dogA the same color as dogB? (False)
# In[58]:
# Example:
print( dogA_color == dogB_color )
# Does dogA have the same name as dogB? (False)
# In[59]:
# Try it out here:
# Is dogA older than dogB? (True)
# In[60]:
# Try it out here:
# Is dogA the same sex as dogB? (False)
# In[61]:
# Try it out here:
# Is dogA heavier than dogB and have a different name than dogB? (True)
# In[62]:
# Try it out here:
# Does dogA have a different age than dogB and not a different sex than dogB? (False)
# In[63]:
# Try it out here:
# ## If-Else Conditions
# We can condition our data using if-else statements and switch cases. If-else statements allow us to do different things if a certain criterion is met or not. We can count the odds and evens in our someNumbers list.
# ### if
# The *if* statement starts with if and then lists a condition that may or may not is met. If the condition is true, we do what is listed. If it is not, we move on.
#
# Our example here is straightforward; if answer is greater than 30, print something.
# In[64]:
answer = 42
if answer > 30:
print( "This number is greater than 30")
# OK, same concept.
# In[65]:
answer = 42
if answer > 50:
print( "This number is greater than 50")
# ```{note}
# Note the structure of a Python if/else statement where some languages use { } to denote the start and end of the if/else statement. Python uses spaces.
#
# if (condition): <-colon
#
# <- space or tab
#
# Anything that is also spaced or tab is *part* of the if statement.
#
# ```
# #### Where the if Starts and Ends
# As mentioned in our note, the if/else statement uses spacing to indicate where it starts and ends. To highlight this, let's look at an example.
# In[66]:
print("Into the If/Else!")
if (10 < 2):
print("In the If/Else!")
print("Still in the If/Else!")
print("How do I get out of here!?")
print("Out of the If/Else!")
# ### else
# In these examples, only the numbers that are greater than 30 and 50 will get any response. We can add a response for values that do not meet the conditional statement found within the if using an *else* statement.
# In[67]:
answer = 42
if answer > 30:
print( answer, "> 30")
else:
print( answer, "< 30")
if answer > 50:
print( answer, "> 50")
else:
print( answer, "< 50")
# ### elif (else if)
# If-else statements can also be stacked together to allow for additional sorting using multiple conditions. The way this is done in python is by using
# ```python
# elif
# ```
#
# This will chain conditions, but once one condition is true. It will stop ✋
#
# Let's take a look at an example.
# In[68]:
favoriteColor = "Yellow"
if (favoriteColor == "Red"):
print ("My favorite color is red.")
elif (favoriteColor == "Orange"):
print ("My favorite color is orange.")
elif (favoriteColor == "Yellow"):
print ("My favorite color is yellow.")
elif (favoriteColor == "Green"):
print ("My favorite color is green.")
elif (favoriteColor == "Blue"):
print ("My favorite color is blue.")
elif (favoriteColor == "Indigo"):
print ("My favorite color is indigo.")
elif (favoriteColor == "Violet"):
print ("My favorite color is violet.")
else:
print ("I don't have a favorite color.")
# ## Loops
# One of the programming features is that we have many options to do the same tasking multiple times. The three methods we will be looking at are:
# * Functions (later notebook)
# * For loops
# * While Loops
#
# ### For Loops
# Loops allow us to do the same thing to each item in a list or array. One of the most basic types of loops is a *for loop* - this allows us to iterate over any sequence.
#
# We set up a for loop using 2 things:
# * loop variable - the value of the sequence currently being used
# * sequence - the data we iterate over
#
# The sequence can be any list. We set up *for loop* using the *for* and *in* keywords, a colon, and all of the code within the *for loop* indented.
# In[69]:
exampleList = ['a', 'niner', 6, 6.1, 'V@@@', 1001/2, 42]
print( exampleList )
# Now, before we talked about accessing elements in a list or array by their index. Meaning, if we wanted to print this out, we would need to...
# In[70]:
print( exampleList[0] )
print( exampleList[1] )
print( exampleList[2] )
print( exampleList[3] )
print( exampleList[4] )
print( exampleList[5] )
print( exampleList[6] )
# #### Looping Over Values
# Very time consuming and frustrating 😤.
#
# Loops make this sooooooo much easier. There are three parts to a *for loop*.
#
# ```python
#
# for variable_name_we_make_up in our_list_name:
# do_something_with_each_value( variable_name_we_make_up )
#
# ```
#
# As stated, variable_name_we_make_up is something we makeup and is used to represent the value as we loop through our, well,... loop.
#
# ```python
# groceryList = ["apple", "banana", "eggs"]
# ```
#
# Remember me?
# In[71]:
groceryList = ["apple", "banana", "eggs"]
for itemInOurList in groceryList:
print (itemInOurList)
# Like mentioned, we name the variable. Here is the same idea again.
# In[72]:
groceryList = ["apple", "banana", "eggs"]
for steve in groceryList:
print (steve)
# Going back to our original list. See how much easier it is to print these values?
# In[73]:
for item in exampleList:
print (item)
# #### Looping Over Indices
# Sometimes, it's helpful to iterate using indices. For example, linear algebra heavy calculations will almost always use indices to make working with vectors and matrices easier.
#
# We can use the
# ```python
# len()
# ```
# and
# ```python
# range()
# ```
#
# functions to show the length and create indices. We can then iterate using the index rather than the values. Let's show off these functions.
# In[74]:
groceryList = ["apple", "banana", "eggs"]
print ( len(groceryList) )
# In[75]:
print ( range(3) )
# ```{note}
# *range()* can be a bit misleading. The range is always one less than what you might expect. Meaning, *range(0,3)* goes from 0 to 1 to 2 to... that's it. So when using *range()* think about it as *range(starting number, ending number - 1)*
# ```
# In[76]:
for index in range(len(groceryList)):
print("index:",index,"value:",groceryList[index])
# You may have noticed that the second line is indented. Like we saw before with If/Else statements. This indent is how we indicate what is in the loop. Our loop can have many lines (all indented). The first line that isn't indented indicates we are out of the loop. This indent is the python syntax for in and out of the loop; other coding languages use other things such as braces {}. Note that blank lines don't matter, just indentation.
# In[77]:
print( "Starting the loop" )
for val in groceryList:
print( "\t", "item:", val )
print( "\t", "Inside the loop" )
print( "Outside the loop" )
# ### While loops
# For loops are used when you have something you are iterating over - you know the length. You can use a while loop if you don't know the number of times something will run. The while loop code requires a conditional statement; the loop will continue to run as long as this is true and will not run again as soon as it is false.
# ##### Conceptual Example
# You can think about taking a test in two different ways.
#
# > Scenario: You are looking through your junk drawer for your sunglasses
#
# For loop:
# ```python
# for item in junk_drawer:
# if (item == "sunglasses"):
# "put them on" 😎
# else:
# "keep looking"
# ```
#
# While loop:
# ```python
# while item != "sunglasses":
# "keep looking"
# item = "some item in the junk drawer"
# "put them on" 😎
# ```
#
# Can you see where each has their unique take on looping? Of course, you don't; you are wearing sunglasses indoors. Take them off first, then check out their uniqueness.
# The condition being set by the while statement will cause this to run as long as the statement is true.
# In[78]:
counting = 0
while (counting < 10):
print ( "before:", counting )
counting = counting + 1
print ("\t","after:",counting)
# One thing to note is that the while loop won't ever be entered if the condition is false when the statement begins as false.
# In[79]:
startAtTen = 10
while (startAtTen < 10):
print ( "before:", startAtTen )
counting = counting + 1
print ("\t","after:",startAtTen )
# ###### 😈 A VERY MEAN Example 😈
# Let's see where we can use this type of loop, in this 😈 VERY MEAN Example 😈. We are creating a set of 30 random numbers from 1 to 50. The *while* will run until it hits its first even number and print this out. Can you spot its MEAN intention?
# In[80]:
import random
randomList = [random.randrange(1, 50, 1) for i in range(30)]
print ( randomList[0:5] )
index = 0
print ("start loop")
while ( randomList[index] % 2 ):
index = index + 1
print ( "the first even number is:", randomList[index])
# So why is this very mean?! Look at our warning.
# ```{warning}
# While loops will keep iterating as long as the statement stays true. Infinite loops are caused by a condition that always stays true. Use the stop button ( 🔲 but filled in ) to stop this erroneous code. Here is an example of this type of code.
# ```
# ```python
# counting = 0
#
# while (counting < 0):
# print ( "This the loop that never ends. Yes, it goes on and on, my friend!" )
# print ( "Some people started looping it not knowing what it was, " )
# print ( "and they'll continue looping it forever just because..." )
# counting = counting + 1
# ```
# This is 😈 A VERY MEAN Example 😈 because it is possible to have a set without a single even number. The odds of picking an even or an odd is a coin flip (50%). Now do this 30 times. What are the odds of flipping a coin 30 times without a single "Tails?"
#
# $\frac{1}{2}$ = 1 coin
#
# $\frac{1}{2} * \frac{1}{2}$ = 2 coins
#
# $\frac{1}{2} * \frac{1}{2} * \frac{1}{2}$ = 3 coins
#
# $(\frac{1}{2})^n$ = n coin
#
# $(\frac{1}{2})^{30}$ = 30 coin = $(\frac{1}{1073741824})$ OR one in 1 billion, 73 million, 741 thousand, 824.
#
# Meaning, a person out of 1073741824 will have an infinite loop!
#
# MUAHAHAHA!!!
# ### Your Turn!
# Try to fill in code to fulfill the request! Here is a variable used in the excercises
# In[81]:
aListOfNumbers = [6, 3, 4, 5, 7, 8, 9 ]
# Write a function that returns the length of aListOfNumbers as well as the maximum value. Hint: max() is a built-in function
# In[82]:
# Try it here:
# Use a for loop to add up all of the numbers in aListOfNumbers.
# In[83]:
# Try it here:
# Use a while loop to find the first number in aListOfNumbers that is both greater than 5 and a multiple of 4.
# In[84]:
# Try it here:
# Count the number of values in aListOfNumbers that are:
# * even
# * odd and divisible by three
# * odd and not divisible by three
#
# using if, elif and else.
# In[85]:
# Try it here:
# Create a dictionary with keys 1-8 corresponding to the words one, two, three, etc. Loop through aListofNumbers to print out the word corresponding to the digit and provide a default value of 'Not Found' if the key is not contained within the dictionary. You should get: six three four five seven eight Not Found
# In[86]:
# Try it here:
# ## Loading a Library
# Module or Library?
# Modules are python's way of organizing functions, variables and constructors, similar to libraries in other languages. In this section, we will look at:
# * Using existing python modules
# * Building our own modules
# * Finding the things that are within modules
# ### Built in Modules
# Python uses modules to make additional functionality available. Modules can be thought of as libraries with many functions, data types, and characteristics that can be used once loaded.
# We load modules using the import statement:
# * Highly recommend import using a name (import module as name)
# * Use the name to keep multiply defined functions separate
# * You can import only individual functions from a module
# * You can also rename functions.
# In[87]:
# Import all functions using a name
import numpy as np
# We then use the name to refer to functions from this module
print( np.sin( 1./2. * np.pi ) )
# We can also import just some of the functions, as well as change their names
from math import cos as mathCos
print( mathCos( np.pi ) )
# Some common python modules are:
# * numpy
# * matplotlib
# * math
# * scipy
# * pandas
#
# Modules based on their topic can be found: https://wiki.python.org/moin/UsefulModules
# Some modules are already included on the system. You may have to add or update some yourself. Python uses pip for module addition, which includes dependencies. Typically users will put modules in their own space using --user, rather than install them globally. For example, to add cython and to update matplolib you would run in a cell:
# ```javascript
# !pip install cython --user
#
# !pip install matplotlib --user --upgrade
# ```
# We can also use dir to see what is currently available to use:
# ### Your Turn!
# Call the math version of tan() mathTan and print out tangent of pi/2. (Hint, pi can come from math or numpy).
# In[88]:
# Try it here
# Does numpy include functions called log10 and banana?
# In[89]:
# Try it here
# ## Creating a Function
# Functions allow us to do repeated tasks easily by writing the code only once. Functions will have a name, inputs, and outputs and can be called anywhere the task is repeated.
#
# There are functions that are built into python; for example, we have already been using the type() function, which tells us the type of variable we are using. Note that print is also a function!
# In[90]:
aVal = 10.0
print( type( aVal ) )
# Functions have four typical parts:
# * Name - what you call your function
# * Input arguments - what you provide
# * Outputs - what the function gives back
# * Math/Magic - what the function does
# ### Creating Our Own Function
# In python, we use def to define a function with the function name and inputs followed by a colon. The python function is then separated from the rest of the code by a tab. Some languages use braces rather than indentation.
# ````python
# def functionName( inputs ):
# # Operate on the inputs
# ouputs = inputs + 5
# # Return what we want to back
# return outputs;
# ````
# Let's look at an example function, which changes degrees Fahrenheit to Celsius.
# In[91]:
def changeFromFToC( farVal ):
cVal = (farVal - 32.0) * 5.0 / 9.0
return cVal
# Here, our function name is *changeFromFToC*, the input is *farVal*, the temperature in Fahrenheit, the output is *cVal*, and the temperature in Celsius. We can print or store the output from the function. Note that the function has to be defined before we use it - the cell with the function definition has to have run before we can call the function.
# In[92]:
print( "Change 14 deg F to Celsius" )
print( changeFromFToC( 14 ) )
print( "Change from 68 deg F to Celsius" )
niceTempC = changeFromFToC( 68 )
print( niceTempC )
# Your turn! What is the temperature today? Convert it to Celsius.
#
# For those who have the temperature in Celsius and want to convert it to Fahrenheit. Define a new function to do this.
# #### Multiple inputs and outputs
# Here is an example of multiple outputs. We can actually work the output in a couple of different ways.
# ##### Multiple Output Function
# In[93]:
def changeFromFToCAndK( farVal ):
# Change the temperature from Fahrenheit to Celsius and Kelvin
cVal = (farVal - 32.0) * 5.0 / 9.0
kVal= cVal + 273.15
return cVal, kVal
# ##### Output: List
# In[94]:
def changeFromFToCAndK( farVal ):
# Change the temperature from Fahrenheit to Celsius and Kelvin
cVal = (farVal - 32.0) * 5.0 / 9.0
kVal= cVal + 273.15
return cVal, kVal
print( "Change 14 deg F to Celsius and Kelvin" )
print( changeFromFToCAndK( 14 ) )
print( "Change 32 deg F to Celsius and Kelvin" )
freezing = changeFromFToCAndK( 32 )
print( freezing[0] )
print( freezing[1] )
# ##### Output: Multiple Variables
# In[95]:
print( "Change 212 deg F to Celsius and Kelvin" )
boilingC, boilingK = changeFromFToCAndK( 212 )
print( boilingC )
print( boilingK )
# ##### Multiple Input Function
# In[96]:
def changeFromFToCOrK( farVal, tempType ):
if (tempType == "C"):
return (farVal - 32.0) * 5.0 / 9.0
elif (tempType == "K"):
return ((farVal - 32.0) * 5.0 / 9.0) + 273.15
else:
return "invalid temperature type"
# In[97]:
print ( changeFromFToCOrK(70,"C") )
# In[98]:
print ( changeFromFToCOrK(70,"K") )
# In[99]:
print ( changeFromFToCOrK(70,"W") )
# #### Function Gotcha! 😆
# ```{note}
# The biggest gotcha on functions is with variable scope:
# * Variables defined in a function are not accessible from the outside
# * Functions have access to more than just the variables passed in
# ```
# In[100]:
def addAnAnimal( animal ):
print ("\t","in the function")
print ("\t","I have access to dog:",dog)
print ("\t","I have access to animal:",animal)
newValue = animal + 1
print ("\t","I have access to newValue:",newValue)
return newValue
print ("outside the function")
dog = 10
print("dog:", dog)
print ("function output:",addAnAnimal( dog ))
# If we would add:
#
# ```python
# print (newValue)
# ```
#
# to the bottom, we would end up with this:
# ```python
# def addAnAnimal( animal ):
# print ("\t","in the function")
# print ("\t","I have access to dog:",dog)
# print ("\t","I have access to animal:",animal)
# newValue = animal + 1
# print ("\t","I have access to newValue:",newValue)
# return newValue
#
# print ("outside the function")
# dog = 10
# print("dog:", dog)
# print ("function output:",addAnAnimal( dog ))
# print (newValue)
# ```
#
# outside the function
#
# dog: 10
#
# in the function
#
# I have access to dog: 10
#
# I have access to animal: 10
#
# I have access to newValue: 11
#
# function output: 11
#
# ```python
# ---------------------------------------------------------------------------
# NameError Traceback (most recent call last)
# <ipython-input-32-07cce689eb00> in <module>
# 11 print("dog:", dog)
# 12 print ("function output:",addAnAnimal( dog ))
# ---> 13 print (newValue)
#
# NameError: name 'newValue' is not defined
# ```
#
# ### Your Turn!
# Try to fill in code to fulfill the request! Here is a variable used in the excercises.
# In[101]:
aListOfNumbers = [6, 3, 4, 5, 7, 8, 9 ]
# Write a function that returns the length of aListOfNumbers as well as the maximum value. Hint: max() is a built-in function
# In[102]:
## try here!
|
nilq/baby-python
|
python
|
import requests, subprocess, time
import OpenSSL, M2Crypto, ssl, socket
import iptools
import random
from termcolor import colored, cprint
# from multiprocessing import Process, Queue, Lock, Pool ---> is not stable with tqdm lib
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool # Used for tqdm instead of pool
class Certcrawler:
def __init__(self, ipAddrList, keywordList, outputFile, region):
socket.setdefaulttimeout(1)
self.allipAddrList = ipAddrList
self.keywordList = keywordList
self.resList = []
self.tryipList = []
self.ipExtractResult = []
self.totalRes = []
self.outputFile = outputFile
self.region = region
cprint ("[+] Start Cloudium certfication scanner ", 'green')
def ipExtract(self, ipClass):
# Extract specific ip addrs from IP Class
self.IPV4 = ipClass
self.tryipList = iptools.IpRange(self.IPV4)
return self.tryipList
def shuffleList(self):
# Shuffle Target IP Lists for avoiding abusing from providers
self.shuffledIPList = random.sample(self.allipAddrList, len(self.allipAddrList))
return self.shuffledIPList
def certScanner (self) :
p = Pool(nodes = 512)
cprint ("[+] Keywords : " + " ".join(str(x) for x in self.keywordList), 'green')
# self.allipAddrList = self.shuffleList()
self.allipAddrList = [x for x in self.shuffleList() if self.region in x ]
for self.tryipClass in self.allipAddrList:
self.ipExtractResult = self.ipExtract(self.tryipClass.split("@")[0])
_max = len(self.ipExtractResult)
cprint ("[+] Scanning IP Addr Class : " + self.tryipClass + "\t-- Number of scan target is :" + str(len(self.ipExtractResult)), 'green')
with tqdm(total=_max) as pbar:
pbar.set_description("[+] Progressing : %s " %self.tryipClass)
for i, domain in tqdm(enumerate(p.imap(self.certChecker, self.ipExtractResult))):
pbar.update()
if domain is not None:
self.resList.append(domain)
pbar.close()
p.terminate() # Like p.close()
p.restart() # Like p.join()
if self.resList:
self.printRes()
else:
cprint ("[!] No kewords found on this IP class \n", 'red')
time.sleep(1)
self.ipExtractResult = []
self.resList = []
def certChecker(self, tryip):
try:
cert = ssl.get_server_certificate((tryip, 443))
x509 = M2Crypto.X509.load_cert_string(cert)
cnDomain = x509.get_subject().as_text().split("CN=")[1]
for x in self.keywordList:
if x in cnDomain:
return cnDomain
else:
pass
except:
pass
def printRes (self) :
# Delete duplicated data
self.resSet = set(self.resList)
self.totalRes.extend(self.resSet)
cprint ("[+] Number of result is : " + str(len(self.resSet)), 'yellow')
for x in self.resSet:
print (x)
def returnRes (self):
return self.totalRes
def fileWriter (self):
f = open(self.outputFile, "w+")
for x in self.totalRes:
f.write(x + "\n")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import flow
if __name__ == '__main__':
flow.initialize()
flow.app.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/pyth2.7
import libiopc_rest as rst
def func_add_img(hostname, options):
payload = '{'
payload += '"ops":"add_qemu_img",'
payload += '"format":"qcow2",'
payload += '"disk_path":"/hdd/data/99_Misc/VMs/sys005.qcow2",'
payload += '"size":30,'
#payload += '"disk_path":"/hdd/data/00_Daily/Data002.qcow2",'
#payload += '"size":200,'
payload += '"size_unit":"G",'
payload += '}'
return rst.http_post_ops_by_pyaload(hostname, payload)
def _start_qemu(idx):
payload = '{'
payload += '"ops":"start_qemu",'
payload += '"qemu_index":%d' % idx
payload += '}'
return payload
def _gencfg_qemu(idx):
payload = '{'
payload += '"ops":"gen_cfg_qemu",'
payload += '"qemu_index":%d' % idx
payload += '}'
return payload
def func_gen_cfg1(hostname, options):
payload = _gencfg_qemu(0)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu1(hostname, options):
payload = _start_qemu(0)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg2(hostname, options):
payload = _gencfg_qemu(1)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu2(hostname, options):
payload = _start_qemu(1)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg3(hostname, options):
payload = _gencfg_qemu(2)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu3(hostname, options):
payload = _start_qemu(2)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg4(hostname, options):
payload = _gencfg_qemu(3)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu4(hostname, options):
payload = _start_qemu(3)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_gen_cfg5(hostname, options):
payload = _gencfg_qemu(4)
return rst.http_post_ops_by_pyaload(hostname, payload)
def func_start_qemu5(hostname, options):
payload = _start_qemu(4)
return rst.http_post_ops_by_pyaload(hostname, payload)
|
nilq/baby-python
|
python
|
from typing import Any, Dict, Optional
import numpy as np
from GPyOpt.optimization.acquisition_optimizer import ContextManager as GPyOptContextManager
from .. import ParameterSpace
Context = Dict[str, Any]
class ContextManager:
"""
Handles the context variables in the optimizer
"""
def __init__(self, space: ParameterSpace,
context: Context,
gpyopt_space: Optional[Dict[str, Any]] = None):
"""
:param space: Parameter space of the search problem.
:param context: Dictionary of variables and their context values.
These values are fixed while optimization.
:param gpyopt_space: Same as space but in GPyOpt format.
"""
self.space = space
if gpyopt_space is None:
gpyopt_space = space.convert_to_gpyopt_design_space()
self._gpyopt_context_manager = GPyOptContextManager(gpyopt_space, context)
self.contextfree_space = ParameterSpace(
[param for param in self.space.parameters if param.name not in context])
self.context_space = ParameterSpace(
[param for param in self.space.parameters if param.name in context])
def expand_vector(self, x: np.ndarray) -> np.ndarray:
"""
Expand contextfree parameter vector by values of the context.
:param x: Contextfree parameter values as 2d-array
:return: Parameter values with inserted context values
"""
if len(self.context_space.parameters) == 0:
return x
else:
return self._gpyopt_context_manager._expand_vector(x)
|
nilq/baby-python
|
python
|
import datetime
anon = int(input('Em que ano você nasceu?'))
anoa = datetime.date.today().year
idade = anoa - anon
if idade < 16:
print('Você ainda não precisa se alistar no exército, sua idade é de {} anos'.format(idade))
elif idade == 16 or idade == 17:
print('Você já pode se alistar no exército, sua idade é de {} anos'.format(idade))
elif idade == 18:
print('Você deve se alistar no exército, sua idade é de {} anos'.format(idade))
else:
print('Seu prazo para se alistar no exército já expirou, terá que pagar uma multa')
|
nilq/baby-python
|
python
|
# https://adventofcode.com/2017/day/3
__author__ = 'Remus Knowles <remknowles@gmail.com>'
def which_layer(integer):
"""
Work out which layer an integer is in.
"""
c = 1
while ((2*c - 1)*(2*c - 1)) <= integer:
c += 1
return c
def layer_rows(layer):
"""
Given a layer return each row as a list.
"""
els = range((2*(layer-1)-1)*(2*(layer-1)-1) + 1, (2*layer-1)*(2*layer-1) + 1)
side_length = len(els) / 4
return [els[:side_length], els[side_length:2*side_length], els[2*side_length:3*side_length], els[3*side_length:]]
def dist(integer):
"""
Return the distance from center.
"""
if integer == 1:
return 0
c = which_layer(integer)
rows = layer_rows(c)
l = len(rows[0])
mid = (l / 2) - 1
for r in rows:
if integer in r:
list_pos = r.index(integer)
return c + abs(mid - list_pos) - 1
def main():
print dist(277678)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
Turing Machine simulator driver
"""
from __future__ import print_function
import json
from turing_machine.Machine import Machine
def _main(machine_filename, tape):
"""
Runs the turing machine simulator
"""
with open(machine_filename) as json_file:
json_data = json.load(json_file)
tmachine = Machine(json_data)
tmachine.set_tape(list(tape))
if tmachine.run() is True:
print("This was a valid TM")
if __name__ == "__main__":
_main('tm_data/tm_01.json', "010#010")
print("-----")
_main('tm_data/tm_02.json', "00xx00")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
PKG = "pr2_mechanism_controllers"
import roslib; roslib.load_manifest(PKG)
import sys
import os
import string
import rospy
from std_msgs import *
from pr2_msgs.msg import PeriodicCmd
from time import sleep
def print_usage(exit_code = 0):
print '''Usage:
send_periodic_cmd.py [controller] [profile] [period] [amplitude] [offset]
- [profile] - Possible options are linear or linear_blended
- [period] - Time for one entire cycle to execute (in seconds)
- [amplitude] - Distance max value to min value of profile (In radians for laser_tilt controller)
- [offset] - Constant cmd to add to profile (offset=0 results in profile centered around 0)
'''
sys.exit(exit_code)
if __name__ == '__main__':
rospy.init_node('periodic_cmd_commander', sys.argv, anonymous=True)
if len(sys.argv) != 6:
print_usage()
cmd = PeriodicCmd()
controller = sys.argv[1]
cmd.header = rospy.Header(None, None, None)
cmd.profile = sys.argv[2]
cmd.period = float (sys.argv[3])
cmd.amplitude = float (sys.argv[4])
cmd.offset = float (sys.argv[5])
print 'Sending Command to %s: ' % controller
print ' Profile Type: %s' % cmd.profile
print ' Period: %f Seconds' % cmd.period
print ' Amplitude: %f Radians' % cmd.amplitude
print ' Offset: %f Radians' % cmd.offset
command_publisher = rospy.Publisher(controller + '/set_periodic_cmd', PeriodicCmd)
sleep(1)
command_publisher.publish( cmd )
sleep(1)
print 'Command sent!'
|
nilq/baby-python
|
python
|
from unittest import TestCase
from pyrrd.node import RRDXMLNode
from pyrrd.testing import dump
from pyrrd.util import XML
class RRDXMLNodeTestCase(TestCase):
def setUp(self):
self.tree = XML(dump.simpleDump01)
def test_creation(self):
rrd = RRDXMLNode(self.tree)
self.assertEqual(rrd.getAttribute("version"), "0003")
self.assertEqual(rrd.getAttribute("step"), "300")
self.assertEqual(rrd.getAttribute("lastupdate"), "920804400")
def test_creationDS(self):
dsChecks = [
("name", "speed"),
("type", "COUNTER"),
("minimal_heartbeat", "600"),
("min", "NaN"),
("max", "NaN"),
("last_ds", "UNKN"),
("value", "0.0000000000e+00"),
("unknown_sec", "0")]
rrd = RRDXMLNode(self.tree)
self.assertEqual(len(rrd.ds), 1)
ds = rrd.ds[0]
for name, value in dsChecks:
self.assertEqual(ds.getAttribute(name), value)
def test_creationRRA(self):
rra1Checks = [
("cf", "AVERAGE"),
("pdp_per_row", "1")]
rra2Checks = [
("cf", "AVERAGE"),
("pdp_per_row", "6")]
rrd = RRDXMLNode(self.tree)
self.assertEqual(len(rrd.rra), 2)
rra1 = rrd.rra[0]
for name, value in rra1Checks:
self.assertEqual(rra1.getAttribute(name), value)
rra2 = rrd.rra[1]
for name, value in rra2Checks:
self.assertEqual(rra2.getAttribute(name), value)
def test_creationRRAParams(self):
rrd = RRDXMLNode(self.tree)
self.assertEqual(len(rrd.rra), 2)
rra1 = rrd.rra[0]
self.assertEqual(rra1.getAttribute("xff"), "5.0000000000e-01")
rra2 = rrd.rra[1]
self.assertEqual(rra2.getAttribute("xff"), "5.0000000000e-01")
def test_creationRRACDPPrep(self):
dsChecks = [
("primary_value", "0.0000000000e+00"),
("secondary_value", "0.0000000000e+00"),
("value", "NaN"),
("unknown_datapoints", "0")]
rrd = RRDXMLNode(self.tree)
cdpPrep1 = rrd.rra[0].cdp_prep
self.assertEqual(len(cdpPrep1.ds), 1)
for name, value in dsChecks:
self.assertEqual(cdpPrep1.ds[0].getAttribute(name), value)
cdpPrep2 = rrd.rra[1].cdp_prep
self.assertEqual(len(cdpPrep2.ds), 1)
for name, value in dsChecks:
self.assertEqual(cdpPrep2.ds[0].getAttribute(name), value)
def test_creationIncludeData(self):
rrd = RRDXMLNode(self.tree, includeData=True)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import logging
from sentry.tasks.base import instrumented_task
from sentry.utils.locking import UnableToAcquireLock
logger = logging.getLogger(__name__)
@instrumented_task(
name='sentry.tasks.process_buffer.process_pending',
queue='buffers.process_pending',
)
def process_pending(partition=None):
"""
Process pending buffers.
"""
from sentry import buffer
from sentry.app import locks
if partition is None:
lock_key = 'buffer:process_pending'
else:
lock_key = 'buffer:process_pending:%d' % partition
lock = locks.get(lock_key, duration=60)
try:
with lock.acquire():
buffer.process_pending(partition=partition)
except UnableToAcquireLock as error:
logger.warning('process_pending.fail', extra={'error': error, 'partition': partition})
@instrumented_task(name='sentry.tasks.process_buffer.process_incr')
def process_incr(**kwargs):
"""
Processes a buffer event.
"""
from sentry import buffer
buffer.process(**kwargs)
|
nilq/baby-python
|
python
|
# Webcam.py
# author: Matthew P. Burruss
# last update: 8/14/2018
# Description: interface for webcam for the various modes
import numpy as np
import cv2
from datetime import datetime
import csv
import socket
import sys
import time
liveStreamServerAddress = ('10.66.229.241',5003)
# release()
# Summary: Cleans up camera.
# Parameter: cap => USB camera object
def release(cap):
print('Releasing')
cap.release()
# configureCamera()
# Summary: Configures camera to take images at a designated height, width, and FPS.
# Parameter: freq => frequency of PWM signal
# dcAcc => duty cycle for acceleration at idle
def configureCamera(width,height,fps):
cap = cv2.VideoCapture(-1)
cap.set(3,width)
cap.set(4,height)
cap.set(5,fps)
cap.set(16,1)
return cap
# There are three modes for the camera thread: mode 1 = data collection camera mode
# mode 2 = autonomous driving camera mode (in Server.py)
# mode 3 = live stream driving camera mode
# MODE 1
# dataCollectionCamera()
# Initializes webcam and stores footage on external USB
# Timestamps/Labels are stored on Rpi
# Resolution: 320x240, FPS: 30
# Parameter: stop_event => event listening for termination of camera
# newpath => USB path to write camera images.
def dataCollectionCamera(stop_event,newpath):
csvfile=open("ImageCount.csv", "w")
cap = configureCamera(320,240,30)
ret=True
count = 0
value1=[]
value2=[]
images = []
# while the user has not signalled to stop camera, take footage and store on external drive
while ret and not stop_event.is_set() and count < 3000:
ret, frame = cap.read()
Imagetimestamp=datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
images.append(frame)
#cv2.imwrite("%s/frame%d.jpg" % (newpath,count),frame)
value1.append(Imagetimestamp)
value2.append(count)
count += 1
print(count)
release(cap)
for i in range(len(images)):
cv2.imwrite("%s/frame%d.jpg"%(newpath,i),images[i])
writer=csv.writer(csvfile)
writer.writerow(value1)
writer.writerow(value2)
csvfile.close()
# MODE 3
# liveStreamCamera()
# creates socket connection over PORT 5002 and sends over camera footage in real time.
# Parameter: stop_event => event listening for termination of camera
def liveStreamCamera(stop_event):
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM,0)
sock2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock2.bind(liveStreamServerAddress)
sock2.listen(1)
cap = configureCamera(320,240,30)
ret = True
connection,client_address = sock2.accept()
while ret and not stop_event.is_set():
# save frame as JPEG file
ret, frame = cap.read()
#frame = frame[0:239,0:319]
frame = cv2.resize(frame,(200,66))
data = cv2.imencode('.jpg', frame)[1].tostring()
size = str(sys.getsizeof(data))
connection.sendall(size.encode())
connection.recv(16)
connection.sendall(data)
connection.recv(10)
release(cap)
sock2.close()
|
nilq/baby-python
|
python
|
from pathlib import Path
from fastapi import FastAPI, APIRouter, Request, Depends
from api.api_v1.api import api_router
from core.config import settings
BASE_PATH = Path(__file__).resolve().parent
root_router = APIRouter()
app = FastAPI(title="OCR API", openapi_url="/openapi.json")
@root_router.get("/", status_code=200)
def root(
request: Request) -> dict:
"""
Root GET
"""
return {"API": "img2ocrpdf-next", "Version": settings.API_V1_STR}
app.include_router(api_router, prefix=settings.API_V1_STR) # <----- API versioning
app.include_router(root_router)
#if __name__ == "__main__":
# Use this for debugging purposes only
#import uvicorn
#uvicorn.run(app, host="0.0.0.0", port=8001, log_level="debug")
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.