id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
185803
|
from zschema.leaves import *
from zschema.compounds import *
import zschema.registry
from ztag.annotation import Annotation
import zcrypto_schemas.zcrypto as zcrypto
import zgrab2_schemas.zgrab2 as zgrab2
import zgrab2_schemas.zgrab2.mssql as zgrab2_mssql
import zgrab2_schemas.zgrab2.oracle as zgrab2_oracle
import zgrab2_schemas.zgrab2.ssh as zgrab2_ssh
__local_metadata = {}
for key in Annotation.LOCAL_METADATA_KEYS:
__local_metadata[key] = WhitespaceAnalyzedString()
local_metadata = SubRecord(__local_metadata)
ztag_dh_export = SubRecord({
"dh_params": zcrypto.DHParams(doc="The parameters for the key."),
"support": Boolean(),
"metadata": local_metadata,
"timestamp": Timestamp(),
})
ztag_dh = SubRecord({
"dh_params": zcrypto.DHParams(doc="The parameters for the key."),
"support": Boolean(),
"metadata": local_metadata,
"timestamp": Timestamp(),
})
ztag_rsa_export = SubRecord({
"rsa_params":zcrypto.RSAPublicKey(),
"support":Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_ecdh = SubRecord({
"ecdh_params":zcrypto.ECDHParams(),
"support":Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
zgrab_certificate_trust = SubRecord({
"type":Enum(doc="root, intermediate, or leaf certificate"),
"trusted_path":Boolean(doc="Does certificate chain up to browser root store"),
"valid":Boolean(doc="is this certificate currently valid in this browser"),
"was_valid":Boolean(doc="was this certificate ever valid in this browser")
})
_zcrypto_parsed_cert = zcrypto.ParsedCertificate()
zgrab_certificate = SubRecord({
"parsed": SubRecord({
"__expanded_names": ListOf(String()),
}, extends=_zcrypto_parsed_cert),
"validation":SubRecord({
"nss":zgrab_certificate_trust.new(category="NSS (Firefox) Validation"),
"apple":zgrab_certificate_trust.new(category="Apple Validation"),
"microsoft":zgrab_certificate_trust.new(category="Microsoft Validation"),
"android":zgrab_certificate_trust,
"java":zgrab_certificate_trust,
}),
})
zgrab_server_certificate_valid = SubRecord({
"complete_chain":Boolean(doc="does server provide a chain up to a root"),
"valid":Boolean(doc="is this certificate currently valid in this browser"),
"error":WhitespaceAnalyzedString()
})
ztag_tls_type = SubRecordType({
# This is server_hello.version.name
"version": zcrypto.TLSVersionName(),
# cipher_suite = { id: server_hello.cipher_suite.hex, name: server_hello.cipher_suite.name }
"cipher_suite": SubRecord({
"id": String(doc="The hexadecimal string representation of the numeric cipher algorithm identifier."),
"name": WhitespaceAnalyzedString(
doc="The algorithm identifier for the cipher algorithm identifier, see e.g. https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml.",
examples=["unknown", "TLS_RSA_WITH_RC4_128_MD5", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA", "TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256"],
),
}),
# server_hello.ocsp_stapling
"ocsp_stapling": Boolean(),
# server_hello.secure_renegotiation
"secure_renegotiation": Boolean(),
# certificate.parsed = server_certificates.certificate.parsed
"certificate": zgrab_certificate,
# chain.parsed = [ elt.parsed for elt in server_certificates.chain ]
"chain": ListOf(zgrab_certificate),
# server_hello.scts
"scts": ListOf(zcrypto.SCTRecord()),
# session_ticket = { key: session_ticket[key] for key in ("length, "lifetime_hint") }
"session_ticket": zcrypto.SessionTicket(),
# validation = { server_certificates.validation[key] for key in ("browser_trusted", "browser_error", "matches_domain") }
"validation": zcrypto.TLSCertificateValidation(),
# server_key_exchange = { server_key_exchange[key] for key in ("ecdh_params", "dh_params", "rsa_params")
"server_key_exchange": zcrypto.ServerKeyExchange(),
# signature = ...
"signature": SubRecord({
# ... = signature.valid
"valid": Boolean(),
# ... = signature.signature_error
"signature_error": WhitespaceAnalyzedString(),
# ... = signature.signature_and_hash_type.signature_algorithm
"signature_algorithm": String(),
# ... = signature.signature_and_hash_type.hash_algorithm
"hash_algorithm": String(),
}),
"metadata": local_metadata,
"timestamp": Timestamp(),
})
ztag_tls = ztag_tls_type()
ztag_heartbleed = SubRecord({
"heartbeat_enabled":Boolean(),
"heartbleed_vulnerable":Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_smtp_starttls = SubRecord({
"banner": WhitespaceAnalyzedString(),
"ehlo": WhitespaceAnalyzedString(),
"starttls": WhitespaceAnalyzedString(),
"tls": ztag_tls,
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_mail_starttls = SubRecord({
"banner": WhitespaceAnalyzedString(),
"starttls": WhitespaceAnalyzedString(),
"tls": ztag_tls,
"metadata": local_metadata,
"timestamp":Timestamp(),
})
ztag_mail_tls = SubRecord({
"tls":ztag_tls,
"banner": WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
zgrab_unknown_http_header = SubRecord({
"key":String(),
"value":WhitespaceAnalyzedString()
})
zgrab_http_headers = SubRecord({
"access_control_allow_origin":WhitespaceAnalyzedString(),
"accept_patch":WhitespaceAnalyzedString(),
"accept_ranges":WhitespaceAnalyzedString(),
"age":WhitespaceAnalyzedString(),
"allow":WhitespaceAnalyzedString(),
"alt_svc":WhitespaceAnalyzedString(),
"alternate_protocol":WhitespaceAnalyzedString(),
"cache_control":WhitespaceAnalyzedString(),
"connection":WhitespaceAnalyzedString(),
"content_disposition":WhitespaceAnalyzedString(),
"content_encoding":WhitespaceAnalyzedString(),
"content_language":WhitespaceAnalyzedString(),
"content_length":WhitespaceAnalyzedString(),
"content_location":WhitespaceAnalyzedString(),
"content_md5":WhitespaceAnalyzedString(),
"content_range":WhitespaceAnalyzedString(),
"content_type":WhitespaceAnalyzedString(),
"date":WhitespaceAnalyzedString(),
"etag":WhitespaceAnalyzedString(),
"expires":WhitespaceAnalyzedString(),
"last_modified":WhitespaceAnalyzedString(),
"link":WhitespaceAnalyzedString(),
"location":WhitespaceAnalyzedString(),
"p3p":WhitespaceAnalyzedString(),
"pragma":WhitespaceAnalyzedString(),
"proxy_authenticate":WhitespaceAnalyzedString(),
"public_key_pins":WhitespaceAnalyzedString(),
"refresh":WhitespaceAnalyzedString(),
"referer":WhitespaceAnalyzedString(),
"retry_after":WhitespaceAnalyzedString(),
"server":WhitespaceAnalyzedString(),
"set_cookie":WhitespaceAnalyzedString(),
"status":WhitespaceAnalyzedString(),
"strict_transport_security":WhitespaceAnalyzedString(),
"trailer":WhitespaceAnalyzedString(),
"transfer_encoding":WhitespaceAnalyzedString(),
"upgrade":WhitespaceAnalyzedString(),
"vary":WhitespaceAnalyzedString(),
"via":WhitespaceAnalyzedString(),
"warning":WhitespaceAnalyzedString(),
"www_authenticate":WhitespaceAnalyzedString(),
"x_frame_options":WhitespaceAnalyzedString(),
"x_xss_protection":WhitespaceAnalyzedString(),
"content_security_policy":WhitespaceAnalyzedString(),
"x_content_security_policy":WhitespaceAnalyzedString(),
"x_webkit_csp":WhitespaceAnalyzedString(),
"x_content_type_options":WhitespaceAnalyzedString(),
"x_powered_by":WhitespaceAnalyzedString(),
"x_ua_compatible":WhitespaceAnalyzedString(),
"x_content_duration":WhitespaceAnalyzedString(),
"x_forwarded_for":WhitespaceAnalyzedString(),
"x_real_ip":WhitespaceAnalyzedString(),
"proxy_agent":WhitespaceAnalyzedString(),
"unknown":ListOf(zgrab_unknown_http_header)
})
ztag_http = SubRecord({
"status_code":Unsigned16BitInteger(),
"status_line":WhitespaceAnalyzedString(),
"body":HTML(),
"headers":zgrab_http_headers,
"body_sha256":HexString(validation_policy="warn"),
"title":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
golang_crypto_param = SubRecord({
"value":IndexedBinary(),
"length":Unsigned32BitInteger()
})
#ztag_open_proxy = SubRecord({
# "connect":SubRecord({
# "status_code":Integer(),
# "status_line":WhitespaceAnalyzedString(),
# "body":WhitespaceAnalyzedString(),
# "headers":zgrab_http_headers
# }),
# "get":SubRecord({
# "status_code":Integer(),
# "status_line":WhitespaceAnalyzedString(),
# "body":WhitespaceAnalyzedString(),
# "headers":zgrab_http_headers,
# "random_present":Boolean(),
# "body_sha256":HexString()
# }),
# "metadata":local_metadata
#})
# 2018/09/07: Workaround for mis-typed CertType.id field in ES; actual type is uint32, current ES
# type is keyword (string).
ssh_certkey_public_key_type = zgrab2_ssh.CertType(exclude={"bigquery"})
ssh_certkey_public_key_type["id"].set("exclude",
ssh_certkey_public_key_type["id"].exclude |
{"elasticsearch"})
ztag_ssh_v2 = SubRecord({
"metadata": local_metadata,
"timestamp": Timestamp(),
"banner": zgrab2_ssh.AnalyzedEndpointID(),
# This is a massaged version of zgrab2_ssh.KexInitMessage
"support": SubRecord({
"kex_algorithms": zgrab2_ssh.KexAlgorithms(),
"host_key_algorithms": zgrab2_ssh.KeyAlgorithms(),
"first_kex_follows": Boolean(),
"client_to_server": SubRecord({
"ciphers": zgrab2_ssh.CipherAlgorithms(),
"macs": zgrab2_ssh.MACAlgorithms(),
"compressions": zgrab2_ssh.CompressionAlgorithms(),
"languages": zgrab2_ssh.LanguageTags(),
}),
"server_to_client":SubRecord({
"ciphers": zgrab2_ssh.CipherAlgorithms(),
"macs": zgrab2_ssh.MACAlgorithms(),
"compressions": zgrab2_ssh.CompressionAlgorithms(),
"languages": zgrab2_ssh.LanguageTags(),
}),
}),
# This is a massaged version of zgrab2_ssh.AlgorithmSelection
"selected": SubRecord({
"kex_algorithm": zgrab2_ssh.KexAlgorithm(),
"host_key_algorithm": zgrab2_ssh.KeyAlgorithm(),
"client_to_server": zgrab2_ssh.DirectionAlgorithms(),
"server_to_client": zgrab2_ssh.DirectionAlgorithms(),
}),
"key_exchange": zgrab2_ssh.KeyExchange(),
# This is a massaged version of zgrab2_ssh.SSHPublicKeyCert
"server_host_key": SubRecord({
"key_algorithm": zgrab2_ssh.KeyAlgorithm(),
"fingerprint_sha256": HexString(),
"rsa_public_key": zcrypto.RSAPublicKey(),
"dsa_public_key": zcrypto.DSAPublicKey(),
"ecdsa_public_key": zcrypto.ECDSAPublicKey(),
"ed25519_public_key": zgrab2_ssh.ED25519PublicKey(),
"certkey_public_key": SubRecord({
# "nonce" is an IndexedBinary here, not a Binary()
"nonce": IndexedBinary(),
# This is an SSHPublicKey ("algorithm", not "key_algorithm")
"key": zgrab2_ssh.SSHPublicKey(),
"serial": String(),
# "cert_type" is renamed to "type"
"type": ssh_certkey_public_key_type,
"key_id": String(),
"valid_principals": ListOf(String()),
"validity": SubRecord({
# These are DateTimes in SSHPublicKeyCert
"valid_after": Timestamp(doc="Timestamp of when certificate is first valid. Timezone is UTC."),
"valid_before": Timestamp(doc="Timestamp of when certificate expires. Timezone is UTC."),
"length": Signed64BitInteger(),
}),
"reserved": Binary(),
"signature_key": SubRecord({
"key_algorithm": zgrab2_ssh.KeyAlgorithm(),
"fingerprint_sha256": HexString(),
"rsa_public_key": zcrypto.RSAPublicKey(),
"dsa_public_key": zcrypto.DSAPublicKey(),
"ecdsa_public_key": zcrypto.ECDSAPublicKey(),
"ed25519_public_key": zgrab2_ssh.ED25519PublicKey(),
}),
"signature": SubRecord({
"signature_algorithm": SubRecord({
"name": zgrab2_ssh.KeyAlgorithm(),
}),
"value": Binary(),
}),
"parse_error": String(),
# Flattens known/unknown
"extensions":SubRecord({
"permit_X11_forwarding": Boolean(),
"permit_agent_forwarding": Boolean(),
"permit_port_forwarding": Boolean(),
"permit_pty": Boolean(),
"permit_user_rc": Boolean(),
"unknown": ListOf(String()),
}),
# Flattens known/unknown
"critical_options": SubRecord({
"force_command": Boolean(),
"source_address": Boolean(),
"unknown": ListOf(String()),
}),
}),
}),
})
ztag_ftp = SubRecord({
"banner":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
telnet_caps_list = ListOf(SubRecord({
"name":String(),
"value":Unsigned32BitInteger()
}))
ztag_telnet = SubRecord({
"support":Boolean(),
"banner":WhitespaceAnalyzedString(),
"will":telnet_caps_list,
"wont":telnet_caps_list,
"do":telnet_caps_list,
"dont":telnet_caps_list,
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_modbus = SubRecord({
"support":Boolean(),
"function_code":Unsigned16BitInteger(),
"mei_response":SubRecord({
"conformity_level":Signed32BitInteger(),
"objects":SubRecord({
"vendor":WhitespaceAnalyzedString(),
"product_code":WhitespaceAnalyzedString(),
"revision":WhitespaceAnalyzedString(),
"vendor_url":URL(),
"product_name":WhitespaceAnalyzedString(),
"model_name":WhitespaceAnalyzedString(),
"user_application_name":WhitespaceAnalyzedString(),
})
}),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_bacnet = SubRecord({
"support":Boolean(),
"instance_number": Signed32BitInteger(),
"vendor": SubRecord({
"id": Signed32BitInteger(),
"reported_name":WhitespaceAnalyzedString(),
"official_name":WhitespaceAnalyzedString(),
}),
"firmware_revision": String(),
"application_software_revision":String(),
"object_name":WhitespaceAnalyzedString(),
"model_name":WhitespaceAnalyzedString(),
"description":WhitespaceAnalyzedString(),
"location":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_dns_question = SubRecord({
"name":String(),
"type":String()
})
ztag_dns_answer = SubRecord({
"name":String(),
"response":WhitespaceAnalyzedString(),
"type":String()
})
ztag_dns_lookup = SubRecord({
"support":Boolean(),
"errors":Boolean(),
"open_resolver":Boolean(),
"resolves_correctly":Boolean(),
"answers":ListOf(ztag_dns_answer),
"authorities":ListOf(ztag_dns_answer),
"additionals":ListOf(ztag_dns_answer),
"questions":ListOf(ztag_dns_question),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_tls_support = SubRecord({
"support": Boolean(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_fox = SubRecord({
"support":Boolean(),
"version":WhitespaceAnalyzedString(),
"id":Signed32BitInteger(),
"hostname":WhitespaceAnalyzedString(),
"host_address":WhitespaceAnalyzedString(),
"app_name":WhitespaceAnalyzedString(),
"app_version":WhitespaceAnalyzedString(),
"vm_name":WhitespaceAnalyzedString(),
"vm_version":WhitespaceAnalyzedString(),
"os_name":WhitespaceAnalyzedString(),
"os_version":WhitespaceAnalyzedString(),
"station_name":WhitespaceAnalyzedString(),
"language":WhitespaceAnalyzedString(),
"time_zone":WhitespaceAnalyzedString(),
"host_id":WhitespaceAnalyzedString(),
"vm_uuid":WhitespaceAnalyzedString(),
"brand_id":WhitespaceAnalyzedString(),
"sys_info":WhitespaceAnalyzedString(),
"auth_agent_type":String(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_dnp3 = SubRecord({
"support":Boolean(),
"raw_response":Binary(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_s7 = SubRecord({
"support":Boolean(),
"system":WhitespaceAnalyzedString(),
"module":WhitespaceAnalyzedString(),
"plant_id":WhitespaceAnalyzedString(),
"copyright":WhitespaceAnalyzedString(),
"serial_number":WhitespaceAnalyzedString(),
"reserved_for_os":WhitespaceAnalyzedString(),
"module_type":WhitespaceAnalyzedString(),
"memory_serial_number":WhitespaceAnalyzedString(),
"cpu_profile":WhitespaceAnalyzedString(),
"oem_id":WhitespaceAnalyzedString(),
"location":WhitespaceAnalyzedString(),
"module_id":WhitespaceAnalyzedString(),
"hardware":WhitespaceAnalyzedString(),
"firmware":WhitespaceAnalyzedString(),
"metadata":local_metadata,
"timestamp":Timestamp(),
})
ztag_smb = SubRecord({
"smbv1_support":Boolean(),
"metadata":local_metadata,
})
ztag_upnp_discovery = SubRecord({
"usn": WhitespaceAnalyzedString(),
"agent": WhitespaceAnalyzedString(),
"st": WhitespaceAnalyzedString(),
"ext": WhitespaceAnalyzedString(),
"location": WhitespaceAnalyzedString(),
"server": WhitespaceAnalyzedString(),
"cache_control": WhitespaceAnalyzedString(),
"x_user_agent": WhitespaceAnalyzedString(),
"metadata": local_metadata,
})
# Add the common zgrab2 fields to the results schema which are added by
# ZGrab2Transform._transform_object().
def ztag_zgrab2_transformed(service, results):
results["supported"] = Boolean(doc="If true, %s was detected on this machine." % service)
results["metadata"] = local_metadata
return results
# The oracle ztag transform is a plain copy of the "handshake" field.
ztag_oracle = ztag_zgrab2_transformed(service="Oracle", results=zgrab2_oracle.oracle_scan_response["result"]["handshake"])
ztag_oracle["tls"] = ztag_tls_type(doc="The TLS handshake with the server (if applicable).")
ztag_mssql = ztag_zgrab2_transformed(service="MSSQL", results=SubRecord({
"version": WhitespaceAnalyzedString(doc="The MSSQL version returned by the server in "
"the PRELOGIN response. Its format is "
"'MAJOR.MINOR.BUILD_NUMBER'."),
"instance_name": WhitespaceAnalyzedString(doc="The value of the INSTANCE field "
"returned by the server in the PRELOGIN "
"response."),
"encrypt_mode": Enum(values=zgrab2_mssql.ENCRYPT_MODES,
doc="The negotiated encryption mode for the session. "
"See https://msdn.microsoft.com/en-us/library/dd357559.aspx "
"for details."),
"tls": ztag_tls_type(doc="The TLS handshake with the server (for "
"non-encrypted connections, this used only "
"for the authentication phase).")
}))
ztag_mysql = ztag_zgrab2_transformed(service="MySQL", results=SubRecord({
"protocol_version": zgrab2.mysql.mysql_scan_response["result"]["protocol_version"],
"server_version": zgrab2.mysql.mysql_scan_response["result"]["server_version"],
"capability_flags": zgrab2.mysql.mysql_capability_flags,
"status_flags": zgrab2.mysql.mysql_server_status_flags,
"error_code": zgrab2.mysql.mysql_scan_response["result"]["error_code"],
"error_id": zgrab2.mysql.mysql_scan_response["result"]["error_id"],
"error_message": zgrab2.mysql.mysql_scan_response["result"]["error_message"],
"tls": ztag_tls_type(doc="If the server allows upgrading the "
"session to use TLS, this is the log of "
"the handshake.")
}))
ztag_mongodb = ztag_zgrab2_transformed(service="MongoDB", results=SubRecord({
"build_info": SubRecord({
"version": WhitespaceAnalyzedString(doc="Version of mongodb server"),
"git_version": WhitespaceAnalyzedString(doc="Git Version of mongodb server"),
"max_wire_version": Signed32BitInteger(),
"build_environment": SubRecord({
"dist_mod": WhitespaceAnalyzedString(),
"dist_arch": WhitespaceAnalyzedString(),
"cc": WhitespaceAnalyzedString(),
"cc_flags": WhitespaceAnalyzedString(),
"cxx": WhitespaceAnalyzedString(),
"cxx_flags": WhitespaceAnalyzedString(),
"link_flags": WhitespaceAnalyzedString(),
"target_arch": WhitespaceAnalyzedString(),
"target_os": WhitespaceAnalyzedString()
})
}, doc="Result of issuing the buildInfo command see https://docs.mongodb.com/manual/reference/command/buildInfo"),
"is_master": SubRecord({
"is_master": Boolean(),
"max_wire_version": Signed32BitInteger(),
"min_wire_version": Signed32BitInteger(),
"max_bson_object_size": Signed32BitInteger(),
"max_write_batch_size": Signed32BitInteger(),
"logical_session_timeout_minutes": Signed32BitInteger(),
"max_message_size_bytes": Signed32BitInteger(),
"read_only": Boolean()
}, doc="Result of issuing the isMaster command see https://docs.mongodb.com/manual/reference/command/isMaster")
}))
ztag_postgres = ztag_zgrab2_transformed(service="PostgreSQL", results=SubRecord({
"supported_versions": WhitespaceAnalyzedString(doc="The error string returned by the "
"server in response to a "
"StartupMessage with "
"ProtocolVersion = 0.0"),
"protocol_error": zgrab2.postgres.postgres_error,
"startup_error": zgrab2.postgres.postgres_error,
"is_ssl": Boolean(doc="If the server supports TLS and the session was "
"updated to use TLS, this is true."),
"authentication_mode": zgrab2.postgres.postgres_auth_mode["mode"],
"backend_key_data": zgrab2.postgres.postgres_key_data,
"tls": ztag_tls_type(doc="If the server allows upgrading the "
"session to use TLS, this is the log of "
"the handshake.")
}))
ztag_ipp = ztag_zgrab2_transformed(service="IPP", results=SubRecord({
"version_major": zgrab2.ipp.ipp_scan_response["result"]["version_major"],
"version_minor": zgrab2.ipp.ipp_scan_response["result"]["version_minor"],
"version_string": zgrab2.ipp.ipp_scan_response["result"]["version_string"],
"cups_version": zgrab2.ipp.ipp_scan_response["result"]["cups_version"],
"attributes": zgrab2.ipp.ipp_scan_response["result"]["attributes"],
"attr_ipp_versions": zgrab2.ipp.ipp_scan_response["result"]["attr_ipp_versions"],
"attr_cups_version": zgrab2.ipp.ipp_scan_response["result"]["attr_cups_version"],
"attr_printer_uris": zgrab2.ipp.ipp_scan_response["result"]["attr_printer_uris"],
"tls": ztag_tls_type(doc="If the server allows upgrading the "
"session to use TLS, this is the log of "
"the handshake."),
}))
ztag_schemas = [
("ztag_https", ztag_tls),
("ztag_heartbleed", ztag_heartbleed),
("ztag_smtp_starttls", ztag_smtp_starttls),
("ztag_imap_starttls", ztag_mail_starttls),
("ztag_pop3_starttls", ztag_mail_starttls),
("ztag_imap_tls", ztag_mail_tls),
("ztag_pop3_tls", ztag_mail_tls),
("ztag_http", ztag_http),
("ztag_ftp", ztag_ftp),
("ztag_dh", ztag_dh),
("ztag_dh_export", ztag_dh_export),
("ztag_rsa_export", ztag_rsa_export),
("ztag_ecdh", ztag_ecdh),
("ztag_sslv3", ztag_tls_support),
("ztag_tls1", ztag_tls_support),
("ztag_tls2", ztag_tls_support),
("ztag_tls3", ztag_tls_support),
("ztag_modbus", ztag_modbus),
("ztag_ssh_v2", ztag_ssh_v2),
("ztag_dns_lookup", ztag_dns_lookup),
("ztag_bacnet", ztag_bacnet),
("ztag_fox", ztag_fox),
("ztag_dnp3", ztag_dnp3),
("ztag_s7", ztag_s7),
("ztag_smb", ztag_smb),
("ztag_upnp_discovery", ztag_upnp_discovery),
("ztag_oracle", ztag_oracle),
("ztag_mssql", ztag_mssql),
("ztag_ipp", ztag_ipp),
("ztag_mongodb", ztag_mongodb),
]
for (name, schema) in ztag_schemas:
x = Record({
"ip_address":IPAddress(required=True),
#"timestamp":Timestamp(required=True),
"tags":ListOf(String()),
"metadata": SubRecord({}, allow_unknown=True),
}, extends=schema)
zschema.registry.register_schema("%s" % name, x)
ztag_lookup_spf = SubRecord({
"raw":WhitespaceAnalyzedString(),
})
ztag_lookup_dmarc = SubRecord({
"raw":WhitespaceAnalyzedString(),
"p":String(),
})
ztag_lookup_axfr = SubRecord({
"servers":ListOf(SubRecord({
"server":String(),
"status":String(),
"name":FQDN(),
"support":Boolean(),
"error":WhitespaceAnalyzedString(),
"records":ListOf(SubRecord({
"algorithm":Unsigned16BitInteger(),
"answer":String(),
"class":String(),
"data":WhitespaceAnalyzedString(),
"digest":WhitespaceAnalyzedString(),
"digest_type":Unsigned16BitInteger(),
"expire":Unsigned32BitInteger(),
"flag":Unsigned16BitInteger(),
"flags":Unsigned16BitInteger(),
"key_tag":Unsigned16BitInteger(),
"mbox":FQDN(),
"min_ttl":Unsigned32BitInteger(),
"name":FQDN(),
"ns":FQDN(),
"preference":Signed16BitInteger(),
"protocol":Unsigned16BitInteger(),
"public_key":String(),
"refresh":Signed32BitInteger(),
"retry":Signed32BitInteger(),
"serial":Unsigned32BitInteger(),
"tag":String(),
"type":String(),
"ttl":Unsigned32BitInteger(),
# FIXME 2018/10/15: Conflict with auto-detected version in Elasticsearch (auto type
# FIXME 2018/10/15: is text, new type is keyword)
"value": String(exclude={"elasticsearch"}),
})),
})),
"truncated":Boolean(),
"support":Boolean()
})
_zdb_location_fields = {
"continent":String(),
"country":WhitespaceAnalyzedString(),
"country_code":String(),
"city":WhitespaceAnalyzedString(),
"postal_code":String(),
"timezone":WhitespaceAnalyzedString(),
"province":WhitespaceAnalyzedString(),
"latitude":Double(),
"longitude":Double(),
"registered_country":WhitespaceAnalyzedString(),
"registered_country_code":String(),
}
zdb_location = SubRecord(_zdb_location_fields, category="Location")
zdb_restricted_location = SubRecord(_zdb_location_fields, exclude=["bigquery",])
zdb_as = SubRecord({
"asn":Unsigned32BitInteger(),
"description":WhitespaceAnalyzedString(),
"path":ListOf(Unsigned32BitInteger()),
"rir":String(),
"routed_prefix":FQDN(),
"name":WhitespaceAnalyzedString(),
"country_code":String(),
"organization":WhitespaceAnalyzedString(),
})
__metadata = {}
for key in Annotation.GLOBAL_METADATA_KEYS:
__metadata[key] = WhitespaceAnalyzedString()
zdb_metadata = SubRecord(__metadata)
CTServerStatus = SubRecord({
"index":Signed64BitInteger(),
"added_to_ct_at":Timestamp(),
"ct_to_censys_at":Timestamp(),
"censys_to_ct_at":Timestamp(),
"sct":IndexedBinary(),
})
CTStatus = SubRecord({
"google_aviator":CTServerStatus,
"google_pilot":CTServerStatus,
"google_rocketeer":CTServerStatus,
"google_submariner":CTServerStatus,
"google_testtube":CTServerStatus,
"google_icarus":CTServerStatus,
"google_skydiver":CTServerStatus,
"google_daedalus":CTServerStatus,
"digicert_ct1":CTServerStatus,
"izenpe_com_ct":CTServerStatus,
"izenpe_eus_ct":CTServerStatus,
"symantec_ws_ct":CTServerStatus,
"symantec_ws_vega":CTServerStatus,
"wosign_ctlog":CTServerStatus,
"wosign_ct":CTServerStatus,
"cnnic_ctserver":CTServerStatus,
"gdca_ct":CTServerStatus,
"startssl_ct":CTServerStatus,
"certly_log":CTServerStatus,
"venafi_api_ctlog":CTServerStatus,
"symantec_ws_deneb":CTServerStatus,
"nordu_ct_plausible":CTServerStatus,
"comodo_dodo":CTServerStatus,
"comodo_mammoth":CTServerStatus,
"gdca_ctlog":CTServerStatus,
"symantec_ws_sirius":CTServerStatus,
"certificatetransparency_cn_ct":CTServerStatus,
"venafi_api_ctlog_gen2":CTServerStatus,
"digicert_ct2":CTServerStatus,
"comodo_sabre":CTServerStatus,
"sheca_ct":CTServerStatus,
"letsencrypt_ct_clicky":CTServerStatus,
"behind_the_sofa":CTServerStatus,
"gdca_log":CTServerStatus,
"gdca_log2":CTServerStatus,
"wotrus_ctlog":CTServerStatus,
"wotrus_ctlog3":CTServerStatus,
"akamai_ct":CTServerStatus,
"google_argon_2017":CTServerStatus,
"google_argon_2018":CTServerStatus,
"google_argon_2019":CTServerStatus,
"google_argon_2020":CTServerStatus,
"google_argon_2021":CTServerStatus,
"google_xenon_2018":CTServerStatus,
"google_xenon_2019":CTServerStatus,
"google_xenon_2020":CTServerStatus,
"google_xenon_2021":CTServerStatus,
"google_xenon_2022":CTServerStatus,
"cloudflare_nimbus_2017":CTServerStatus,
"cloudflare_nimbus_2018":CTServerStatus,
"cloudflare_nimbus_2019":CTServerStatus,
"cloudflare_nimbus_2020":CTServerStatus,
"cloudflare_nimbus_2021":CTServerStatus,
"digicert_nessie_2018":CTServerStatus,
"digicert_nessie_2019":CTServerStatus,
"digicert_nessie_2020":CTServerStatus,
"digicert_nessie_2021":CTServerStatus,
"digicert_nessie_2022":CTServerStatus,
"digicert_yeti_2018":CTServerStatus,
"digicert_yeti_2019":CTServerStatus,
"digicert_yeti_2020":CTServerStatus,
"digicert_yeti_2021":CTServerStatus,
"digicert_yeti_2022":CTServerStatus,
"digicert_golem":CTServerStatus,
"izenpe_com_pilot":CTServerStatus,
"letsencrypt_ct_birch":CTServerStatus,
"letsencrypt_ct_faux":CTServerStatus,
"letsencrypt_ct_oak":CTServerStatus,
"nordu_ct_flimsy":CTServerStatus,
"sheca_ctlog":CTServerStatus,
"wosign_ctlog2":CTServerStatus,
"wosign_ctlog3":CTServerStatus,
"ctlogs_alpha":CTServerStatus,
})
CertificateAudit = SubRecord({
"ccadb":SubRecord({
"current_in_intermediates":Boolean(),
"was_in_intermediates":Boolean(),
"owner_name":WhitespaceAnalyzedString(),
"parent_name":WhitespaceAnalyzedString(),
"certificate_name":WhitespaceAnalyzedString(),
"certificate_policy":WhitespaceAnalyzedString(),
"certification_practice_statement":WhitespaceAnalyzedString(),
"cp_same_as_parent":Boolean(),
"audit_same_as_parent":Boolean(),
"standard_audit":WhitespaceAnalyzedString(),
"br_audit":WhitespaceAnalyzedString(),
"auditor":WhitespaceAnalyzedString(),
"standard_audit_statement_timestamp":Timestamp(),
"management_assertions_by":WhitespaceAnalyzedString(),
"comments":EnglishString(es_include_raw=True),
"ev_policy_oids":WhitespaceAnalyzedString(),
"approval_bug":WhitespaceAnalyzedString(),
"first_nss_release":WhitespaceAnalyzedString(),
"first_firefox_release":WhitespaceAnalyzedString(),
"ev_audit":WhitespaceAnalyzedString(),
"current_in_roots":Boolean(),
"was_in_roots":Boolean(),
"test_website_valid":WhitespaceAnalyzedString(),
"mozilla_applied_constraints":WhitespaceAnalyzedString(),
"company_website":WhitespaceAnalyzedString(),
"geographic_focus":WhitespaceAnalyzedString(),
"standard_audit_type":WhitespaceAnalyzedString(),
}, category="CCADB Audit")
})
ztag_certificate_validation = SubRecord({
"valid":Boolean(doc="((has_trusted_path && !revoked && !blacklisted) || whitelisted) && !expired"),
"was_valid":Boolean(doc="True if the certificate is valid now or was ever valid in the past."),
"trusted_path":Boolean(doc="True if there exists a path from the certificate to the root store."),
"had_trusted_path":Boolean(doc="True if now or at some point in the past there existed a path "
"from the certificate to the root store."),
"blacklisted":Boolean(doc="True if the certificate is explicitly blacklisted by some method than OneCRL/CRLSet. "
"For example, a set of certificates revoked by Cloudflare are blacklisted by SPKI hash in Chrome."),
"whitelisted":Boolean(doc="True if the certificate is explicitly whitelisted, "
"e.g. the set of trusted WoSign certificates Apple uses."),
"type":Enum(["leaf","intermediate","root","unknown"], doc="Indicates if the certificate is a root, intermediate, or leaf."),
"paths":NestedListOf(HexString(), "path", validation_policy="ignore"),
"in_revocation_set":Boolean(doc="True if the certificate is in the revocation set (e.g. OneCRL) associated with this root store."),
"parents":ListOf(HexString()),
})
class LintBool(String):
ES_TYPE = "boolean"
# Lints can have any of the following outputs:
# - RESERVED [should never happen]
# - NA [not applicable]
# - NE [not applicable]
# - PASS [test success]
# - INFO [failed for info]
# - WARN [failed for warn]
# - FAIL [failed for error]
# - FATAL [test could not complete because cert is broken]
# - UNKNOWN [should never occur]
# We don't want to store a string for every lint in elasticsearch because
# our index size would explode. Instead we map these to a string:
# {
# (reserved, unknown, ne, na, pass) -> null,
# (notice, warning, fail, fatal) -> true
# }
# For BigQuery, we have more options, so we allow some more information:
# {
# all map to original value
# }
# This is horrible to schema, so define a custom type
Lints = SubRecord({
"e_basic_constraints_not_critical":LintBool(),
"e_ca_common_name_missing":LintBool(),
"e_ca_country_name_invalid":LintBool(),
"e_ca_country_name_missing":LintBool(),
"e_ca_crl_sign_not_set":LintBool(),
"e_ca_is_ca":LintBool(),
"e_ca_key_cert_sign_not_set":LintBool(),
"e_ca_key_usage_missing":LintBool(),
"e_ca_key_usage_not_critical":LintBool(),
"e_ca_organization_name_missing":LintBool(),
"e_ca_subject_field_empty":LintBool(),
"e_cab_dv_conflicts_with_locality":LintBool(),
"e_cab_dv_conflicts_with_org":LintBool(),
"e_cab_dv_conflicts_with_postal":LintBool(),
"e_cab_dv_conflicts_with_province":LintBool(),
"e_cab_dv_conflicts_with_street":LintBool(),
"e_cab_iv_requires_personal_name":LintBool(),
"e_cab_ov_requires_org":LintBool(),
"e_cert_contains_unique_identifier":LintBool(),
"e_cert_extensions_version_not_3":LintBool(),
"e_cert_policy_iv_requires_country":LintBool(),
"e_cert_policy_iv_requires_province_or_locality":LintBool(),
"e_cert_policy_ov_requires_country":LintBool(),
"e_cert_policy_ov_requires_province_or_locality":LintBool(),
"e_cert_unique_identifier_version_not_2_or_3":LintBool(),
"e_distribution_point_incomplete":LintBool(),
"e_dnsname_bad_character_in_label":LintBool(),
"e_dnsname_contains_bare_iana_suffix":LintBool(),
"e_dnsname_empty_label":LintBool(),
"e_dnsname_hyphen_in_sld":LintBool(),
"e_dnsname_label_too_long":LintBool(),
"e_dnsname_left_label_wildcard_correct":LintBool(),
"e_dnsname_not_valid_tld":LintBool(),
"e_dnsname_underscore_in_sld":LintBool(),
"e_dnsname_wildcard_only_in_left_label":LintBool(),
"e_dsa_correct_order_in_subgroup":LintBool(),
"e_dsa_improper_modulus_or_divisor_size":LintBool(),
"e_dsa_params_missing":LintBool(),
"e_dsa_shorter_than_2048_bits":LintBool(),
"e_dsa_unique_correct_representation":LintBool(),
"e_ec_improper_curves":LintBool(),
"e_ev_business_category_missing":LintBool(),
"e_ev_country_name_missing":LintBool(),
"e_ev_locality_name_missing":LintBool(),
"e_ev_organization_name_missing":LintBool(),
"e_ev_serial_number_missing":LintBool(),
"e_ev_valid_time_too_long":LintBool(),
"e_ext_aia_marked_critical":LintBool(),
"e_ext_authority_key_identifier_critical":LintBool(),
"e_ext_authority_key_identifier_missing":LintBool(),
"e_ext_authority_key_identifier_no_key_identifier":LintBool(),
"e_ext_cert_policy_disallowed_any_policy_qualifier":LintBool(),
"e_ext_cert_policy_duplicate":LintBool(),
"e_ext_cert_policy_explicit_text_ia5_string":LintBool(),
"e_ext_cert_policy_explicit_text_too_long":LintBool(),
"e_ext_duplicate_extension":LintBool(),
"e_ext_freshest_crl_marked_critical":LintBool(),
"e_ext_ian_dns_not_ia5_string":LintBool(),
"e_ext_ian_empty_name":LintBool(),
"e_ext_ian_no_entries":LintBool(),
"e_ext_ian_rfc822_format_invalid":LintBool(),
"e_ext_ian_space_dns_name":LintBool(),
"e_ext_ian_uri_format_invalid":LintBool(),
"e_ext_ian_uri_host_not_fqdn_or_ip":LintBool(),
"e_ext_ian_uri_not_ia5":LintBool(),
"e_ext_ian_uri_relative":LintBool(),
"e_ext_key_usage_cert_sign_without_ca":LintBool(),
"e_ext_key_usage_without_bits":LintBool(),
"e_ext_name_constraints_not_critical":LintBool(),
"e_ext_name_constraints_not_in_ca":LintBool(),
"e_ext_policy_constraints_empty":LintBool(),
"e_ext_policy_constraints_not_critical":LintBool(),
"e_ext_policy_map_any_policy":LintBool(),
"e_ext_san_contains_reserved_ip":LintBool(),
"e_ext_san_directory_name_present":LintBool(),
"e_ext_san_dns_name_too_long":LintBool(),
"e_ext_san_dns_not_ia5_string":LintBool(),
"e_ext_san_edi_party_name_present":LintBool(),
"e_ext_san_empty_name":LintBool(),
"e_ext_san_missing":LintBool(),
"e_ext_san_no_entries":LintBool(),
"e_ext_san_not_critical_without_subject":LintBool(),
"e_ext_san_other_name_present":LintBool(),
"e_ext_san_registered_id_present":LintBool(),
"e_ext_san_rfc822_format_invalid":LintBool(),
"e_ext_san_rfc822_name_present":LintBool(),
"e_ext_san_space_dns_name":LintBool(),
"e_ext_san_uniform_resource_identifier_present":LintBool(),
"e_ext_san_uri_format_invalid":LintBool(),
"e_ext_san_uri_host_not_fqdn_or_ip":LintBool(),
"e_ext_san_uri_not_ia5":LintBool(),
"e_ext_san_uri_relative":LintBool(),
"e_ext_subject_directory_attr_critical":LintBool(),
"e_ext_subject_key_identifier_critical":LintBool(),
"e_ext_subject_key_identifier_missing_ca":LintBool(),
"e_generalized_time_does_not_include_seconds":LintBool(),
"e_generalized_time_includes_fraction_seconds":LintBool(),
"e_generalized_time_not_in_zulu":LintBool(),
"e_ian_bare_wildcard":LintBool(),
"e_ian_dns_name_includes_null_char":LintBool(),
"e_ian_dns_name_starts_with_period":LintBool(),
"e_ian_wildcard_not_first":LintBool(),
"e_inhibit_any_policy_not_critical":LintBool(),
"e_international_dns_name_not_nfkc":LintBool(),
"e_international_dns_name_not_unicode":LintBool(),
"e_invalid_certificate_version":LintBool(),
"e_issuer_field_empty":LintBool(),
"e_name_constraint_empty":LintBool(),
"e_name_constraint_maximum_not_absent":LintBool(),
"e_name_constraint_minimum_non_zero":LintBool(),
"e_old_root_ca_rsa_mod_less_than_2048_bits":LintBool(),
"e_old_sub_ca_rsa_mod_less_than_1024_bits":LintBool(),
"e_old_sub_cert_rsa_mod_less_than_1024_bits":LintBool(),
"e_path_len_constraint_improperly_included":LintBool(),
"e_path_len_constraint_zero_or_less":LintBool(),
"e_public_key_type_not_allowed":LintBool(),
"e_root_ca_extended_key_usage_present":LintBool(),
"e_root_ca_key_usage_must_be_critical":LintBool(),
"e_root_ca_key_usage_present":LintBool(),
"e_rsa_exp_negative":LintBool(),
"e_rsa_mod_less_than_2048_bits":LintBool(),
"e_rsa_no_public_key":LintBool(),
"e_rsa_public_exponent_not_odd":LintBool(),
"e_rsa_public_exponent_too_small":LintBool(),
"e_san_bare_wildcard":LintBool(),
"e_san_dns_name_includes_null_char":LintBool(),
"e_san_dns_name_starts_with_period":LintBool(),
"e_san_wildcard_not_first":LintBool(),
"e_serial_number_longer_than_20_octets":LintBool(),
"e_serial_number_not_positive":LintBool(),
"e_signature_algorithm_not_supported":LintBool(),
"e_sub_ca_aia_does_not_contain_ocsp_url":LintBool(),
"e_sub_ca_aia_marked_critical":LintBool(),
"e_sub_ca_aia_missing":LintBool(),
"e_sub_ca_certificate_policies_missing":LintBool(),
"e_sub_ca_crl_distribution_points_does_not_contain_url":LintBool(),
"e_sub_ca_crl_distribution_points_marked_critical":LintBool(),
"e_sub_ca_crl_distribution_points_missing":LintBool(),
"e_sub_ca_eku_missing":LintBool(),
"e_sub_ca_eku_name_constraints":LintBool(),
"e_sub_ca_must_not_contain_any_policy":LintBool(),
"e_sub_cert_aia_does_not_contain_ocsp_url":LintBool(),
"e_sub_cert_aia_marked_critical":LintBool(),
"e_sub_cert_aia_missing":LintBool(),
"e_sub_cert_cert_policy_empty":LintBool(),
"e_sub_cert_certificate_policies_missing":LintBool(),
"e_sub_cert_country_name_must_appear":LintBool(),
"e_sub_cert_crl_distribution_points_does_not_contain_url":LintBool(),
"e_sub_cert_crl_distribution_points_marked_critical":LintBool(),
"e_sub_cert_eku_missing":LintBool(),
"e_sub_cert_eku_server_auth_client_auth_missing":LintBool(),
"e_sub_cert_given_name_surname_contains_correct_policy":LintBool(),
"e_sub_cert_key_usage_cert_sign_bit_set":LintBool(),
"e_sub_cert_key_usage_crl_sign_bit_set":LintBool(),
"e_sub_cert_locality_name_must_appear":LintBool(),
"e_sub_cert_locality_name_must_not_appear":LintBool(),
"e_sub_cert_not_is_ca":LintBool(),
"e_sub_cert_or_sub_ca_using_sha1":LintBool(),
"e_sub_cert_postal_code_must_not_appear":LintBool(),
"e_sub_cert_province_must_appear":LintBool(),
"e_sub_cert_province_must_not_appear":LintBool(),
"e_sub_cert_street_address_should_not_exist":LintBool(),
"e_sub_cert_valid_time_too_long":LintBool(),
"e_subject_common_name_max_length":LintBool(),
"e_subject_common_name_not_from_san":LintBool(),
"e_subject_contains_noninformational_value":LintBool(),
"e_subject_contains_reserved_ip":LintBool(),
"e_subject_country_not_iso":LintBool(),
"e_subject_empty_without_san":LintBool(),
"e_subject_info_access_marked_critical":LintBool(),
"e_subject_locality_name_max_length":LintBool(),
"e_subject_not_dn":LintBool(),
"e_subject_organization_name_max_length":LintBool(),
"e_subject_organizational_unit_name_max_length":LintBool(),
"e_subject_state_name_max_length":LintBool(),
"e_utc_time_does_not_include_seconds":LintBool(),
"e_utc_time_not_in_zulu":LintBool(),
"e_validity_time_not_positive":LintBool(),
"e_wrong_time_format_pre2050":LintBool(),
"n_ca_digital_signature_not_set":LintBool(),
"n_contains_redacted_dnsname":LintBool(),
"n_sub_ca_eku_not_technically_constrained":LintBool(),
"n_subject_common_name_included":LintBool(),
"w_distribution_point_missing_ldap_or_uri":LintBool(),
"w_dnsname_underscore_in_trd":LintBool(),
"w_dnsname_wildcard_left_of_public_suffix":LintBool(),
"w_eku_critical_improperly":LintBool(),
"w_ext_aia_access_location_missing":LintBool(),
"w_ext_cert_policy_contains_noticeref":LintBool(),
"w_ext_cert_policy_explicit_text_includes_control":LintBool(),
"w_ext_cert_policy_explicit_text_not_nfc":LintBool(),
"w_ext_cert_policy_explicit_text_not_utf8":LintBool(),
"w_ext_crl_distribution_marked_critical":LintBool(),
"w_ext_ian_critical":LintBool(),
"w_ext_key_usage_not_critical":LintBool(),
"w_ext_policy_map_not_critical":LintBool(),
"w_ext_policy_map_not_in_cert_policy":LintBool(),
"w_ext_san_critical_with_subject_dn":LintBool(),
"w_ext_subject_key_identifier_missing_sub_cert":LintBool(),
"w_ian_iana_pub_suffix_empty":LintBool(),
"w_issuer_dn_leading_whitespace":LintBool(),
"w_issuer_dn_trailing_whitespace":LintBool(),
"w_multiple_issuer_rdn":LintBool(),
"w_multiple_subject_rdn":LintBool(),
"w_name_constraint_on_edi_party_name":LintBool(),
"w_name_constraint_on_registered_id":LintBool(),
"w_name_constraint_on_x400":LintBool(),
"w_root_ca_basic_constraints_path_len_constraint_field_present":LintBool(),
"w_root_ca_contains_cert_policy":LintBool(),
"w_rsa_mod_factors_smaller_than_752":LintBool(),
"w_rsa_mod_not_odd":LintBool(),
"w_rsa_public_exponent_not_in_range":LintBool(),
"w_san_iana_pub_suffix_empty":LintBool(),
"w_serial_number_low_entropy":LintBool(),
"w_sub_ca_aia_does_not_contain_issuing_ca_url":LintBool(),
"w_sub_ca_certificate_policies_marked_critical":LintBool(),
"w_sub_ca_eku_critical":LintBool(),
"w_sub_ca_name_constraints_not_critical":LintBool(),
"w_sub_cert_aia_does_not_contain_issuing_ca_url":LintBool(),
"w_sub_cert_certificate_policies_marked_critical":LintBool(),
"w_sub_cert_eku_extra_values":LintBool(),
"w_sub_cert_sha1_expiration_too_long":LintBool(),
"w_subject_dn_leading_whitespace":LintBool(),
"w_subject_dn_trailing_whitespace":LintBool(),
}, validation_policy="ignore")
ZLint = SubRecord({
# version is an int64 in the protobuf
"version":Unsigned16BitInteger(validation_policy="ignore"),
"notices_present":Boolean(),
"warnings_present":Boolean(),
"errors_present":Boolean(),
"fatals_present":Boolean(),
"lints":Lints,
})
certificate = Record({
"parsed": SubRecord({
"__expanded_names": ListOf(String()),
}, extends=zcrypto.ParsedCertificate()),
"raw":Binary(),
"fingerprint_sha256":HexString(),
"tags":ListOf(WhitespaceAnalyzedString()),
"metadata":SubRecord({
"updated_at":Timestamp(),
"added_at":Timestamp(),
"post_processed":Boolean(),
"post_processed_at":Timestamp(),
"seen_in_scan":Boolean(),
"source":String(),
"parse_version":Unsigned16BitInteger(),
"parse_error":WhitespaceAnalyzedString(),
"parse_status":String(),
}, category="Metadata"),
"parents":ListOf(String(), category="Misc"),
"parent_spki_subject_fingerprint":HexString(),
"validation":SubRecord({
"nss":ztag_certificate_validation.new(category="NSS (Firefox) Validation"),
"apple":ztag_certificate_validation.new(category="Apple Validation"),
"microsoft":ztag_certificate_validation.new(category="Microsoft Validation"),
#"java":ztag_certificate_validation,
#"android":ztag_certificate_validation,
"google_ct_primary":ztag_certificate_validation.new(category="Google CT Validation"),
#"google_ct_submariner":ztag_certificate_validation,
}),
"ct":CTStatus.new(category="Certificate Transparency Logs"),
# TODO: 2018/08/14 -- ccadb data is not being loaded, so hold off on creating this schema.
# "audit":CertificateAudit,
"zlint":ZLint.new(category="ZLint"),
"precert":Boolean(category="Misc")
})
zschema.registry.register_schema("certificate", certificate)
ipv4_host = Record({
Port(443):SubRecord({
"https":SubRecord({
"tls":ztag_tls,
"get":ztag_http,
"heartbleed":ztag_heartbleed,
"dhe": ztag_dh,
"rsa_export": ztag_rsa_export,
"dhe_export": ztag_dh_export,
"ssl_3": ztag_tls_support,
"tls_1_1": ztag_tls_support,
"tls_1_2": ztag_tls_support,
"ecdhe": ztag_ecdh,
}, category="443/HTTPS")
}),
Port(80):SubRecord({
"http":SubRecord({
"get":ztag_http,
}, category="80/HTTP"),
}),
Port(8080):SubRecord({
"http":SubRecord({
"get":ztag_http,
}, category="8080/HTTP"),
}),
Port(8888):SubRecord({
"http":SubRecord({
"get":ztag_http,
}, category="8888/HTTP"),
}),
Port(25):SubRecord({
"smtp":SubRecord({
"starttls": ztag_smtp_starttls,
}, category="25/SMTP"),
}),
Port(23):SubRecord({
"telnet":SubRecord({
"banner":ztag_telnet
}, category="23/Telnet")
}),
Port(2323):SubRecord({
"telnet":SubRecord({
"banner":ztag_telnet
}, category="2323/Telnet")
}),
Port(21):SubRecord({
"ftp":SubRecord({
"banner":ztag_ftp,
}, category="21/FTP")
}),
Port(102):SubRecord({
"s7":SubRecord({
"szl":ztag_s7
}, category="102/S7")
}),
Port(110):SubRecord({
"pop3":SubRecord({
"starttls":ztag_mail_starttls,
}, category="110/POP3")
}),
Port(143):SubRecord({
"imap":SubRecord({
"starttls":ztag_mail_starttls,
}, category="143/IMAP")
}),
Port(445):SubRecord({
"smb":SubRecord({
"banner":ztag_smb
}, category="445/SMB", validation_policy="error")
}),
Port(993):SubRecord({
"imaps":SubRecord({
"tls":ztag_mail_tls,
}, category="993/IMAPS")
}),
Port(995):SubRecord({
"pop3s":SubRecord({
"tls":ztag_mail_tls,
}, category="995/POP3S")
}),
Port(587):SubRecord({
"smtp":SubRecord({
"starttls": ztag_smtp_starttls,
}, category="587/SMTP")
}),
Port(502):SubRecord({
"modbus":SubRecord({
"device_id":ztag_modbus
}, category="502/Modbus")
}),
Port(22):SubRecord({
"ssh":SubRecord({
"v2": ztag_ssh_v2
}, category="22/SSH"),
}),
Port(53):SubRecord({
"dns":SubRecord({
"lookup":ztag_dns_lookup
}, category="53/DNS")
}),
Port(47808):SubRecord({
"bacnet":SubRecord({
"device_id":ztag_bacnet
}, category="47808/BACNET")
}),
Port(1911):SubRecord({
"fox":SubRecord({
"device_id":ztag_fox
}, category="1911/Fox")
}),
Port(20000):SubRecord({
"dnp3":SubRecord({
"status":ztag_dnp3,
}, category="20000/DNP3")
}),
Port(7547):SubRecord({
"cwmp":SubRecord({
"get":ztag_http,
}, category="7547/CWMP")
}),
Port(1900):SubRecord({
"upnp":SubRecord({
"discovery":ztag_upnp_discovery,
}, category="1900/UPnP")
}),
Port(1521):SubRecord({
"oracle":SubRecord({
"banner": ztag_oracle,
}, category="1521/Oracle"),
}),
Port(1433):SubRecord({
"mssql":SubRecord({
"banner": ztag_mssql,
}, category="1433/MSSQL"),
}),
Port(3306): SubRecord({
"mysql": SubRecord({
"banner": ztag_mysql,
}, category="3306/MySQL"),
}),
Port(27017): SubRecord({
"mongodb": SubRecord({
"banner": ztag_mongodb ,
}, category="27017/MongoDB"),
}),
Port(5432): SubRecord({
"postgres": SubRecord({
"banner": ztag_postgres,
}, category="5432/Postgres"),
}),
Port(631): SubRecord({
"ipp": SubRecord({
"banner": ztag_ipp,
}, category="631/IPP"),
}),
"tags":ListOf(WhitespaceAnalyzedString(), category="Basic Information"),
"metadata":zdb_metadata,
"location":zdb_location,
"__restricted_location":zdb_restricted_location,
"autonomous_system":zdb_as.new(category="Basic Information"),
"notes":WhitespaceAnalyzedString(),
"ip":IPv4Address(required=True, category="Basic Information"),
"ipint":Unsigned32BitInteger(required=True, doc="Integer value of IP address in host order"),
"updated_at":Timestamp(),
"zdb_version":Unsigned32BitInteger(),
"protocols":ListOf(String(), category="Basic Information"),
"ports":ListOf(Unsigned16BitInteger())
})
website = Record({
Port(443):SubRecord({
"https":SubRecord({
"get":ztag_http,
"tls":ztag_tls,
"heartbleed":ztag_heartbleed,
"dhe": ztag_dh,
"rsa_export": ztag_rsa_export,
"dhe_export": ztag_dh_export,
"ssl_3": ztag_tls_support,
"tls_1_1": ztag_tls_support,
"tls_1_2": ztag_tls_support,
"ecdhe": ztag_ecdh,
}),
"https_www":SubRecord({
"tls":ztag_tls,
"get":ztag_http,
})
}, category="443/HTTPS"),
Port(80):SubRecord({
"http":SubRecord({
"get":ztag_http,
}),
"http_www":SubRecord({
"get":ztag_http,
}),
}, category="80/HTTP"),
Port(25):SubRecord({
"smtp":SubRecord({
"starttls": ztag_smtp_starttls,
})
}, category="25/SMTP"),
Port(0):SubRecord({
"lookup":SubRecord({
"spf":ztag_lookup_spf,
"dmarc":ztag_lookup_dmarc,
"axfr":ztag_lookup_axfr,
})
}, category="Basic Information"),
"tags":ListOf(WhitespaceAnalyzedString(), category="Basic Information"),
"metadata":zdb_metadata,
"notes":EnglishString(es_include_raw=True),
"domain":String(category="Basic Information"),
"alexa_rank":Unsigned32BitInteger(doc="Rank in the Alexa Top 1 Million. "
"Null if not currently in the Top 1 Million sites.",
category="Basic Information"),
"updated_at":Timestamp(),
"zdb_version":Unsigned32BitInteger(),
"protocols":ListOf(String(), category="Basic Information"),
"ports":ListOf(Unsigned16BitInteger())
})
DROP_KEYS = {'ip_address', 'metadata', 'tags', 'timestamp'}
zschema.registry.register_schema("ipv4host", ipv4_host)
zschema.registry.register_schema("website", website)
|
185828
|
from fastapi import FastAPI, Request
from starlette.responses import JSONResponse
app = FastAPI()
@app.exception_handler(Exception)
async def generic_exception_handler(request: Request, exc: Exception):
return JSONResponse(status_code=200, content="It doesn't work!")
@app.get("/")
async def home():
raise Exception()
|
185889
|
from .inlinecallbackbutton import InlineCallbackButton
from .inlinecallbackhandler import InlineCallbackHandler
from .inlineactionhandler import InlineActionHandler
|
185895
|
import os
import struct
import sys
import pytest
from bonsai.active_directory.acl import ACL
from bonsai.active_directory.sid import SID
from conftest import get_config
from bonsai import LDAPClient
from bonsai.active_directory import SecurityDescriptor
@pytest.fixture
def client():
""" Create a client with authentication settings. """
cfg = get_config()
url = f"ldap://{cfg['SERVER']['hostname']}:{cfg['SERVER']['port']}"
client = LDAPClient(url)
client.set_credentials(
"SIMPLE", user=cfg["SIMPLEAUTH"]["user"], password=cfg["SIMPLEAUTH"]["password"]
)
return client
def test_from_binary():
""" Test from_binary method. """
with pytest.raises(TypeError):
_ = SecurityDescriptor.from_binary(0)
with pytest.raises(TypeError):
_ = SecurityDescriptor.from_binary("INVALID")
with pytest.raises(ValueError):
_ = SecurityDescriptor.from_binary(b"\x05\nH\x00\x07\x00")
curdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(curdir, "testenv", "sd-sample0.bin"), "rb") as data:
input_data = data.read()
sec_desc = SecurityDescriptor.from_binary(input_data)
assert sec_desc.revision == 1
assert sec_desc.group_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.owner_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.sbz1 == 0
assert sec_desc.control["dacl_present"]
assert len(sec_desc.dacl.aces) == 24
assert sec_desc.dacl.aces[0].type == 5
assert str(sec_desc.dacl.aces[0].trustee_sid) == "S-1-5-32-554"
assert not sec_desc.control["sacl_present"]
assert sec_desc.sacl is None
with open(os.path.join(curdir, "testenv", "sd-sample1.bin"), "rb") as data:
input_data = data.read()
sec_desc = SecurityDescriptor.from_binary(input_data)
assert sec_desc.revision == 1
assert sec_desc.group_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.owner_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.sbz1 == 0
assert sec_desc.control["dacl_present"]
assert len(sec_desc.dacl.aces) == 24
assert sec_desc.dacl.aces[0].type == 5
assert sec_desc.dacl.aces[0].trustee_sid == "S-1-5-32-554"
assert sec_desc.control["sacl_present"]
assert len(sec_desc.sacl.aces) == 3
assert sec_desc.sacl.aces[0].type == 2
assert sec_desc.sacl.aces[0].trustee_sid == "S-1-1-0"
@pytest.mark.parametrize(
"file", ["sd-sample0.bin", "sd-sample1.bin"],
)
def test_to_binary(file):
""" Test to_binary method. """
curdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(curdir, "testenv", file), "rb") as data:
expected_data = data.read()
test_sec_desc = SecurityDescriptor.from_binary(expected_data)
test_data = test_sec_desc.to_binary()
(
expected_rev,
expected_sbz1,
expected_ctrl,
expected_offset_owner,
expected_offset_group,
expected_offset_sacl,
expected_offset_dacl,
) = struct.unpack("<BBHIIII", expected_data[:20])
(
test_rev,
test_sbz1,
test_ctrl,
test_offset_owner,
test_offset_group,
test_offset_sacl,
test_offset_dacl,
) = struct.unpack("<BBHIIII", test_data[:20])
assert len(test_data) == len(expected_data)
assert test_rev == expected_rev
assert test_sbz1 == expected_sbz1
assert test_ctrl == expected_ctrl
if expected_offset_owner:
assert (
test_data[
test_offset_owner : test_offset_owner + test_sec_desc.owner_sid.size
]
== expected_data[
expected_offset_owner : expected_offset_owner
+ test_sec_desc.owner_sid.size
]
)
if expected_offset_group:
assert (
test_data[
test_offset_group : test_offset_group + test_sec_desc.group_sid.size
]
== expected_data[
expected_offset_group : expected_offset_group
+ test_sec_desc.group_sid.size
]
)
if expected_offset_sacl:
assert (
test_data[test_offset_sacl : test_offset_sacl + test_sec_desc.sacl.size]
== expected_data[
expected_offset_sacl : expected_offset_sacl
+ test_sec_desc.sacl.size
]
)
if expected_offset_dacl:
assert (
test_data[test_offset_dacl : test_offset_dacl + test_sec_desc.dacl.size]
== expected_data[
expected_offset_dacl : expected_offset_dacl
+ test_sec_desc.dacl.size
]
)
assert SecurityDescriptor.from_binary(test_data).to_binary() == test_data
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="Cannot query SecurityDescriptor from OpenLDAP",
)
@pytest.mark.parametrize(
"sd_flags, owner_sid, group_sid, dacl, sacl",
[
(1, True, False, False, False),
(2, False, True, False, False),
(3, True, True, False, False),
(4, False, False, True, False),
(8, False, False, False, True),
(15, True, True, True, True),
],
ids=["only-owner", "only-group", "owner-group", "only-dacl", "only-sacl", "all"],
)
def test_sd_flags(client, sd_flags, owner_sid, group_sid, dacl, sacl):
""" Test LDAP_SERVER_SD_FLAGS_OID control """
client.sd_flags = sd_flags
with client.connect() as conn:
res = conn.search(
"cn=chuck,ou=nerdherd,dc=bonsai,dc=test",
0,
attrlist=["nTSecurityDescriptor"],
)[0]
sec_desc = SecurityDescriptor.from_binary(res["nTSecurityDescriptor"][0])
assert sec_desc.revision == 1
if owner_sid:
assert sec_desc.owner_sid is not None
assert isinstance(sec_desc.owner_sid, SID)
else:
assert sec_desc.owner_sid is None
if group_sid:
assert sec_desc.group_sid is not None
assert isinstance(sec_desc.group_sid, SID)
else:
assert sec_desc.group_sid is None
assert sec_desc.control["dacl_present"] is dacl
if dacl:
assert isinstance(sec_desc.dacl, ACL)
else:
assert sec_desc.dacl is None
assert sec_desc.control["sacl_present"] is sacl
if sacl:
assert isinstance(sec_desc.sacl, ACL)
else:
assert sec_desc.sacl is None
|
185899
|
import yaml
class MotionRecorder:
def __init__(self, input_file=None):
self.data = {}
if input_file:
with open(input_file, 'r') as file:
self.data = yaml.safe_load(file)
def append(self, object_name, pose):
if object_name not in self.data:
self.data[object_name] = []
self.data[object_name].append(pose.tolist())
def get(self, object_name):
return self.data[object_name] if object_name in self.data else None
def save(self, output_file):
with open(output_file, 'w') as file:
yaml.safe_dump(self.data, file)
|
185931
|
import numpy as np
import torch
"""
This class define the parent class of operation
Author: SunnerLi
"""
class OP():
"""
The parent class of each operation
The goal of this class is to adapting with different input format
"""
def work(self, tensor):
"""
The virtual function to define the process in child class
Arg: tensor - The np.ndarray object. The tensor you want to deal with
"""
raise NotImplementedError("You should define your own function in the class!")
def __call__(self, tensor):
"""
This function define the proceeding of the operation
There are different choice toward the tensor parameter
1. torch.Tensor and rank is CHW
2. np.ndarray and rank is CHW
3. torch.Tensor and rank is TCHW
4. np.ndarray and rank is TCHW
Arg: tensor - The tensor you want to operate
Ret: The operated tensor
"""
isTensor = type(tensor) == torch.Tensor
if isTensor:
tensor_type = tensor.type()
tensor = tensor.cpu().data.numpy()
if len(tensor.shape) == 3:
tensor = self.work(tensor)
elif len(tensor.shape) == 4:
tensor = np.asarray([self.work(_) for _ in tensor])
else:
raise Exception("We dont support the rank format {}".format(tensor.shape),
"If the rank of the tensor shape is only 2, you can call 'GrayStack()'")
if isTensor:
tensor = torch.from_numpy(tensor)
tensor = tensor.type(tensor_type)
return tensor
|
185974
|
import time
from pkg_resources import resource_filename
from typing import Dict, Union, List, Tuple, Optional, NamedTuple, Set, Iterable, Sequence
import dtale
import dtale.views
import dtale.global_state
from autoplot.extensions.autoplot_display import AutoplotDisplay
from autoplot.extensions.toast import Toast, ToastType
from IPython.display import clear_output, display, Image
import pandas as pd
from autoplot.view_manager import View
DataId = str
VariableName = str
class VarData(NamedTuple):
pdf: Union[pd.Series, pd.DataFrame]
dd: dtale.views.DtaleData
class DTaler(View):
"""
Implements the dtale backend for this project. This class will call
dtale.show for each new dataframe, given dtale's implementation that means
we will have a single dtale server per kernel. This class uses the data_id
field in AutoplotDisplay, which is populated from the frontend. That tells
it which table is being shown to the user.
"""
def __init__(self):
# This is the current state of the variables we track. The tracked dict are the active variables, ignored and
# forced_shown are variables the user wants to either ignore or stop ignoring
self._tracked: Dict[str, VarData] = {}
self._ignored: Set[str] = set()
self._frozen = False
# These variables are set using magic commands and informs us of the user's intentions before update_variables
# is called.
self._ignore_next: List[str] = []
self._show_next: List[str] = []
# The following variables are refreshed every time update_variables is called. They represent the difference
# between what's changed in the cell and the current internal state.
self._new: List[VariableName] = []
self._force_show: List[VariableName] = []
self._updated: List[DataId] = []
self._deleted: List[DataId] = []
# This is a helper variable used to check whether it's the first time dtale.show is called. Due to race
# conditions in the package, we can't call show twice too fast.
self._first_show = True
def update_variables(self, pandas_vars: Dict[str, Union[pd.Series, pd.DataFrame]]) -> None:
# These variables are populated with the difference between the current state and the new variables
self._updated = []
self._new = []
self._deleted = []
self._force_show = []
# 1. The first step is to clean up our internal structures, so it matches what is in dtale and what is currently
# available in the namespace.
# 1.a. Remove and ignore variables deleted in dtale
removed = _removed_in_dtale(self._tracked.items())
for name in removed:
self._remove_tracked_var(name)
self._ignored.add(name)
# 1.b. Clean up all variables that are not in the namespace anymore. If they were deleted in dtale and from
# the namespace, completely forget about the name, rather than keeping it in the ignore list.
for name in (self._ignored | set(self._tracked.keys())) - set(pandas_vars.keys()):
self._remove_tracked_var(name)
if name in self._ignored:
self._ignored.remove(name)
# 2. Ignore all new variables when frozen. Add to ignored all variables the user wants to ignore but remove all.
# variables they want to show.
if self._frozen:
self._ignored |= set(pandas_vars.keys()) - set(self._tracked.keys())
# TODO: Currently, we do not know the order variables are shown/ignored so show will have precendence over
# ignore if they are both executed in the same cell.
self._ignored |= set(self._ignore_next)
self._ignored -= set(self._show_next)
self._force_show = self._show_next
# 2.a. clean up user intentions now that we've consumed them.
self._ignore_next, self._show_next = [], []
# 3. Now that we know which variables to ignore, make sure they are all removed from the tracked dict as needed.
for name in self._ignored:
self._remove_tracked_var(name)
# 4. Start tracking variables which are not ignored
for name in set(pandas_vars.keys()) - (set(self._tracked.keys()) | self._ignored):
self._add_tracked_var(name, pandas_vars[name])
# 5. Update tracked variables which have changed
updated_variables = _filter_updated(pandas_vars.items(), self._tracked.copy())
for name, var in updated_variables.items():
self._update_tracked_var(name, var)
def draw(self, force: bool, output: AutoplotDisplay) -> None:
refresh = False
current = dtale.get_instance(output.data_id)
if current is None:
current = Image(data=resource_filename(__name__, "assets/imgs/dtale.png"))
# The conditionals below encode precedence. Whatever the user wants to show takes is the preferred value to
# display, followed by new values and so on.
if set(self._force_show) & set(self._tracked.keys()):
for key in reversed(self._force_show):
if key in self._tracked:
current = self._tracked[key].dd
refresh = True
break
elif set(self._new) & set(self._tracked.keys()):
for key in reversed(self._new):
if key in self._tracked:
current = self._tracked[key].dd
refresh = True
break
elif self._updated:
if output.data_id in self._updated:
refresh = True
elif self._deleted:
if output.data_id in self._deleted:
# We don't know what the user is seeing, so we have to switch to an arbitrary dataframe, in case
# they are seeing the deleted one
current = _next_dtale_data()
refresh = True
if refresh or force:
with output:
clear_output()
display(current)
def freeze(self, toast: Toast) -> None:
if not self._frozen:
toast.show(
"Dtale is 'frozen'. New DFs will not be tracked, but tracked ones will still update.", ToastType.info
)
self._frozen = True
def defrost(self, toast: Toast) -> None:
if self._frozen:
toast.show(
"Dtale is 'defrosted'. DFs defined while it was frozen will not be automatically picked up. Use --show "
" to get them added.",
ToastType.info,
)
self._frozen = False
def ignore_variable(self, toast: Toast, var_name: str) -> None:
self._ignore_next.append(var_name)
def show_variable(self, toast: Toast, var_name: str) -> None:
self._show_next.append(var_name)
def _dtale_show(self, *args, **kwargs) -> dtale.views.DtaleData:
result = dtale.show(*args, **kwargs)
if self._first_show:
# when running show for the first time, if that happens in rapid succession, it can cause race conditions
# internal to dtale
time.sleep(0.3)
self._first_show = False
return result
def _remove_tracked_var(self, var_name: str):
vardata = self._tracked.pop(var_name, None)
if vardata:
data_id = vardata.dd._data_id
dtale.global_state.cleanup(data_id)
self._deleted.append(vardata.dd._data_id)
def _add_tracked_var(self, name, var):
dd = self._dtale_show(var, ignore_duplicate=True, reaper_on=False, name=name, hide_shutdown=True)
self._tracked[name] = VarData(pdf=var, dd=dd)
self._new.append(name)
def _update_tracked_var(self, name, var):
vardata = self._tracked[name]
vardata.dd.data = var
self._tracked[name] = VarData(pdf=var, dd=vardata.dd)
self._updated.append(vardata.dd._data_id)
def _removed_in_dtale(tracked: Iterable) -> Set[str]:
removed: Set[str] = set()
for name, vardata in tracked:
if dtale.global_state.find_data_id(vardata.dd._data_id) is None:
removed.add(name)
return removed
def _filter_updated(pandas_vars: Iterable, tracked: Dict[str, VarData]) -> Dict[str, Union[pd.Series, pd.DataFrame]]:
result: Dict[str, Union[pd.Series, pd.DataFrame]] = {}
for name, var in pandas_vars:
vardata = tracked.get(name)
if vardata is not None and vardata.pdf is not var:
result[name] = var
return result
def _next_dtale_data():
data_id = next(iter(dtale.global_state.get_data().keys()), None)
if data_id is not None:
return dtale.get_instance(data_id)
else:
return Image(data=resource_filename(__name__, "assets/imgs/dtale.png"))
|
186004
|
from typing import Callable
import pytest
from tests.taxonomy.conftest import TestDirectory, validate_taxonomy
@pytest.mark.parametrize("defect", [(1, 1), (2, 56), (3, 22), (4, 29), (5, 42)])
def test_zsh(defect, defect_path: Callable[[int, int], TestDirectory], gitenv):
index, case = defect
test_dir = defect_path(index, case)
validate_taxonomy(test_dir, index, case)
|
186101
|
import pickle
from utils import *
def visualize(_id, predict, label, masks, words, sent_attns=[1], msg=False, save_html=False, save_img=True):
h5_string_list = list()
h5_string_list.append('<div class="cam">')
h5_string_list.append("<head><meta charset='utf-8'></head>")
h5_string_list.append("Change Id: {}<br>".format(_id))
h5_string_list.append("Change Label: {},Predict: {}<br>".format(label, predict))
save_path = '/data2/cg/DeepJIT/cam/code/'
if msg:
masks = [masks]
words = [words]
save_path = '/data2/cg/DeepJIT/cam/msg/'
for line_mask, line_word, sent_attn in zip(masks, words, sent_attns):
h5_string_list.append(
'<font style="background: rgba(0, 0, 255, %f)">     </font>' % sent_attn)
for mask, word in zip(line_mask, line_word):
h5_string_list.append('<font style="background: rgba(255, 0, 0, %f)">%s </font>' % (mask, word))
h5_string_list.append('<br>')
h5_string_list.append('</div>')
h5_string = ''.join(h5_string_list)
h5_path = os.path.join(save_path, "{}.html".format(_id))
with open(h5_path, "w") as h5_file:
h5_file.write(h5_string)
if save_img:
options = webdriver.ChromeOptions()
options.add_argument('--headless')
ob=Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome(options=options)
url = "file:///{}".format(h5_path)
# print(url)
driver.get(url)
element=driver.find_element_by_class_name('cam')
img_url=ob.get_element(driver, element, save_location=save_path)
img_path = os.path.join(save_path, "{}.png".format(_id))
os.system('mv {} {}'.format(img_url, img_path))
driver.close()
driver.quit()
if not save_html:
os.system('rm {}'.format(h5_path))
# project = 'qt'
project = 'openstack'
data = pickle.load(open('cam/{}_msg_cam.pkl'.format(project), 'rb'))
all_ids, all_msg, all_msg_mask, all_predict, all_label = data
dictionary = pickle.load(open('{}/{}_dict.pkl'.format(project, project), 'rb'))
dict_msg, dict_code = dictionary
id2msg_world = get_world_dict(dict_msg)
for _id, msg, maks, pred, label in zip(all_ids, all_msg, all_msg_mask, all_predict, all_label):
# plt.figure(figsize=(100,10))
msg = mapping_dict_world(msg, id2msg_world)
if 'task-number' in msg or 'fix' in msg or 'bug' in msg or 'failures' in msg \
or 'resolves' in msg or 'fail' in msg or 'bugs' in msg:
if label == 1 and pred > 0.5:
visualize(_id, pred, label, maks, msg, msg=True)
|
186123
|
from math import hypot
import pygame
import rabbyt
rabbyt.init_display((640,480))
rabbyt.set_viewport((640,480), projection=(0,0,640,480))
control_points = [rabbyt.Sprite(xy=xy) for xy in
[(100,100),(200,50),(300,150),(400,100)]]
grabbed_point = None
path_followers = []
def generate_followers():
global path_followers
p = [c.xy for c in control_points]
path_followers = [rabbyt.Sprite(xy=rabbyt.bezier3(p[0], p[1], p[2], p[3],
i*200, i*200 + 2000, extend="repeat")) for i in range(10)]
generate_followers()
print "Click and drag the control points."
clock = pygame.time.Clock()
running=True
while running:
clock.tick(40)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running=False
elif event.type == pygame.KEYDOWN:
if event.key in (pygame.K_ESCAPE, pygame.K_q):
running=False
elif event.type == pygame.MOUSEBUTTONDOWN:
for c in control_points:
if hypot(c.x-event.pos[0], c.y-event.pos[1]) < 20:
grabbed_point = c
break
elif event.type == pygame.MOUSEMOTION:
if grabbed_point:
grabbed_point.xy = event.pos
generate_followers()
elif event.type == pygame.MOUSEBUTTONUP:
grabbed_point = None
rabbyt.set_time(pygame.time.get_ticks())
rabbyt.clear()
rabbyt.render_unsorted(control_points)
rabbyt.render_unsorted(path_followers)
pygame.display.flip()
|
186136
|
from .__about__ import *
import logging
__all__ = ['__version__',
'__classification__', '__author__', '__url__', '__email__',
'__title__', '__summary__',
'__license__', '__copyright__']
# establish logging paradigm
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
|
186170
|
from collections import defaultdict
from pprint import pprint
from math import log
# comparison values for n degrees freedom
# These values are useable for both the chi^2 and G tests
_ptable = {
1:3.841,
2:5.991,
3:7.815,
4:9.488,
5:11.071,
6:12.592,
7:14.067,
8:15.507,
9:16.919,
10:18.307,
11:19.7,
12:21,
13:22.4,
14:23.7,
15:25,
16:26.3
}
_get_count = lambda k, d : d[k]['count'] if k in d else 0
def g_value(actual, expected):
# G = 2 * sum(Oi * ln(Oi/Ei))
answerKeys = set(list(actual.keys()) + list(expected.keys()))
degreesFreedom = len(answerKeys)
G = 0
for k in answerKeys:
E = _get_count(k, expected)
O = _get_count(k, actual)
if E == 0:
print(' Warning! Expected 0 counts of {}, but got {}'.format(k, O))
elif O == 0:
print(' Warning! O = {}'.format(O))
else:
G += O * log(O/E)
G *= 2
return degreesFreedom, G
def chi_value(actual, expected):
answerKeys = set(list(actual.keys()) + list(expected.keys()))
degreesFreedom = len(answerKeys)
chiSquared = 0
for k in answerKeys:
E = _get_count(k, expected)
O = _get_count(k, actual)
if E == 0:
print(' Warning! Expected 0 counts of {}, but got {}'.format(k, O))
else:
chiSquared += (O - E) ** 2 / E
return degreesFreedom, chiSquared
def probability_difference(actual, expected):
actualC = 0
expectedC = 0
for k in set(list(actual.keys()) + list(expected.keys())):
expectedC += _get_count(k, expected)
actualC += _get_count(k, actual)
p = 0
Et = 0
Ot = 0
for k in set(list(actual.keys()) + list(expected.keys())):
E = _get_count(k, expected)
O = _get_count(k, actual)
Ep = E / expectedC
Op = O / actualC
p += abs(Ep - Op)
p /= 2 # P is between 0 and 2 -> P is between 0 and 1
return p
def dist_test(actual, expected, calculation):
df, p = calculation(actual, expected)
if df not in _ptable:
raise Exception('{} degrees of freedom does not have a corresponding chi squared value.' + \
' Please look up the value and add it to the table in copycat/statistics.py'.format(df))
return (p < _ptable[df])
def cross_formula_table(actualDict, expectedDict, calculation, probs=False):
data = dict()
for ka, actual in actualDict.items():
for ke, expected in expectedDict.items():
if probs:
data[(ka, ke)] = probability_difference(actual, expected)
else:
data[(ka, ke)] = dist_test(actual, expected, calculation)
return data
def cross_table(problemSets, calculation=g_value, probs=False):
table = defaultdict(dict)
for i, (a, problemSetA) in enumerate(problemSets):
for b, problemSetB in problemSets[i + 1:]:
for problemA in problemSetA:
for problemB in problemSetB:
if (problemA.initial == problemB.initial and
problemA.modified == problemB.modified and
problemA.target == problemB.target):
answersA = problemA.distributions
answersB = problemB.distributions
table[(problemA.initial,
problemA.modified,
problemA.target)][(a, b)] = (
cross_formula_table(
answersA, answersB, calculation, probs))
return table
def iso_chi_squared(actualDict, expectedDict):
for key in expectedDict.keys():
assert key in actualDict, 'The key {} was not tested'.format(key)
actual = actualDict[key]
expected = expectedDict[key]
if not dist_test(actual, expected, g_value):
raise Exception('Value of G higher than expected')
|
186196
|
import numpy as np
import os
import pygame, sys
from pygame.locals import *
import sys
sys.path.insert(0, os.getcwd())
import rodentia
BLACK = (0, 0, 0)
MAX_STEP_NUM = 60 * 30
class Display(object):
def __init__(self, display_size):
self.width = 640
self.height = 480
self.data_path = os.path.dirname(
os.path.abspath(__file__)) + "/../../examples/data/"
self.env = rodentia.Environment(
width=self.width, height=self.height, bg_color=[0.1, 0.1, 0.1])
self.prepare_stage()
#self.prepare_maze_stage()
self.obj_ids_set = set()
self.reset()
pygame.init()
self.surface = pygame.display.set_mode(display_size, 0, 24)
pygame.display.set_caption('rodentia')
def prepare_stage(self):
floor_texture_path = self.data_path + "floor3.png"
# Floor
self.env.add_box(
texture_path=floor_texture_path,
half_extent=[20.0, 1.0, 20.0],
pos=[0.0, -1.0, 0.0],
rot=0.0,
detect_collision=False)
wall_texture_path = self.data_path + "wall2.png"
# -Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[20.0, 1.0, 1.0],
pos=[0.0, 1.0, -20.0],
rot=0.0,
detect_collision=False)
# +Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[20.0, 1.0, 1.0],
pos=[0.0, 1.0, 20.0],
rot=0.0,
detect_collision=False)
# -X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, 20.0],
pos=[-20.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# +X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, 20.0],
pos=[20.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# Debug box
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, 1.0],
pos=[0.0, 1.0, -5.0],
rot=0,
detect_collision=False)
def prepare_maze_stage(self):
floor_texture_path = self.data_path + "floor3.png"
# Floor
self.env.add_box(
texture_path=floor_texture_path,
half_extent=[20.0, 1.0, 20.0],
pos=[0.0, -1.0, 0.0],
rot=0.0,
detect_collision=False)
wall_texture_path = self.data_path + "wall0.png"
wall_thickness = 0.1
# [Center room]
# -Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[0.0, 1.0, -4.0],
rot=0.0,
detect_collision=False)
# +X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 3.0],
pos=[1.0, 1.0, -1.0],
rot=0.0,
detect_collision=False)
# +Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-1.0, 1.0, 2.0],
rot=0.0,
detect_collision=False)
# -X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[-3.0, 1.0, 1.0],
rot=0.0,
detect_collision=False)
# -X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[-3.0, 1.0, -3.0],
rot=0.0,
detect_collision=False)
# [Outer wall]
# Left (-X) wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 10.0],
pos=[-5.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# Right (+X) wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 10.0],
pos=[5.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# -Z wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[5.0, 1.0, wall_thickness],
pos=[0.0, 1.0, -10.0],
rot=0.0,
detect_collision=False)
# +Z wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[5.0, 1.0, wall_thickness],
pos=[0.0, 1.0, 10.0],
rot=0.0,
detect_collision=False)
# [-Z L]
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-1.0, 1.0, -6.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 2.0],
pos=[-3.0, 1.0, -8.0],
rot=0.0,
detect_collision=False)
# [-Z 7]
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[1.0, 1.0, -8.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 3.0],
pos=[3.0, 1.0, -5.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, -2.0],
rot=0.0,
detect_collision=False)
# ゴール横大パネル
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[3.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# ゴール横小パネル
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, 2.0],
rot=0.0,
detect_collision=False)
# 椅子型
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, 4.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[3.0, 1.0, 5.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[4.0, 1.0, 6.0],
rot=0.0,
detect_collision=False)
# 足の長い椅子型
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-1.0, 1.0, 6.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[1.0, 1.0, 7.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, 8.0],
rot=0.0,
detect_collision=False)
# 横一直線
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 2.0],
pos=[-1.0, 1.0, 4.0],
rot=0.0,
detect_collision=False)
# 下1枚
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[-4.0, 1.0, 4.0],
rot=0.0,
detect_collision=False)
# 下2枚
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-3.0, 1.0, 8.0],
rot=0.0,
detect_collision=False)
def update(self):
self.surface.fill(BLACK)
self.process()
pygame.display.update()
def get_action(self):
lookAction = 0
strafeAction = 0
moveAction = 0
pressed = pygame.key.get_pressed()
if pressed[K_q]:
lookAction += 6
if pressed[K_e]:
lookAction -= 6
if pressed[K_a]:
strafeAction += 1
if pressed[K_d]:
strafeAction -= 1
if pressed[K_w]:
moveAction += 1
if pressed[K_s]:
moveAction -= 1
return np.array([lookAction, strafeAction, moveAction], dtype=np.int32)
def process_sub(self, action):
obs = self.env.step(action=action)
self.step_num += 1
screen = obs["screen"]
collided = obs["collided"]
reward = 0
#if len(collided) != 0:
# for id in collided:
# reward += 1
# self.env.remove_obj(id)
self.total_reward += reward
terminal = self.total_reward >= 2 or self.step_num >= MAX_STEP_NUM
return screen, reward, terminal
def process(self):
action = self.get_action()
screen, reward, terminal = self.process_sub(action)
image = pygame.image.frombuffer(screen, (self.width, self.height),
'RGB')
self.surface.blit(image, (0, 0))
if terminal:
self.reset()
def clear_objects(self):
for id in self.obj_ids_set:
self.env.remove_obj(id)
self.obj_ids_set = set()
def reset(self):
# Clear remaining reward objects
self.clear_objects()
texture_path = self.data_path + "red.png"
# Reward Sphere
obj_id0 = self.env.add_sphere(
texture_path=texture_path,
radius=1.0,
pos=[-5.0, 1.0, 5.0],
rot=0.0,
mass=1.0,
detect_collision=True)
obj_id1 = self.env.add_sphere(
texture_path=texture_path,
radius=1.0,
pos=[5.0, 1.0, 5.0],
rot=0.0,
mass=1.0,
detect_collision=True)
self.obj_ids_set.add(obj_id0)
self.obj_ids_set.add(obj_id1)
# add test model
model_path0 = self.data_path + "apple0.obj"
self.env.add_model(
path=model_path0,
scale=[1.0, 1.0, 1.0],
pos=[0.0, 0.0, 10.0], # +z pos
rot=0.0,
mass=1.0,
detect_collision=True)
model_path1 = self.data_path + "lemon0.obj"
self.env.add_model(
path=model_path1,
scale=[1.0, 1.0, 1.0],
pos=[10.0, 0.0, 10.0],
rot=0.0,
mass=1.0,
detect_collision=True)
model_path2 = self.data_path + "ramp0.obj"
self.env.add_model(
path=model_path2,
scale=[2.0, 1.0, 2.0],
pos=[10.0, 0.0, 5.0],
rot=np.pi * 0.25,
mass=0.0,
detect_collision=False,
use_mesh_collision=True)
model_path3 = self.data_path + "cylinder0.obj"
self.env.add_model(
path=model_path3,
scale=[3.0, 3.0, 3.0],
pos=[-5.0, 0.0, 8.0],
rot=0.0,
mass=0.0,
detect_collision=False,
use_mesh_collision=True)
# Locate agent to default position
self.env.locate_agent(pos=[0, 0, 0], rot_y=0.0)
# Set light params
self.env.set_light(dir=[-0.5, -1.0, -0.4])
self.total_reward = 0
self.step_num = 0
def main():
display_size = (640, 480)
display = Display(display_size)
clock = pygame.time.Clock()
running = True
FPS = 60
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
display.update()
clock.tick(FPS)
if __name__ == '__main__':
main()
|
186219
|
from petisco.base.domain.message.message import Message
class RabbitMqMessageQueueNameFormatter:
@staticmethod
def format(message: Message, exchange_name: str = None) -> str:
message_name = message.name.replace(".", "_")
message_type = message.type if message.type != "domain_event" else "event"
message_format = f"{message.version}.{message_type}.{message_name}"
return f"{exchange_name}.{message_format}" if exchange_name else message_name
@staticmethod
def format_retry(message: Message, exchange_name: str = None) -> str:
queue_name = RabbitMqMessageQueueNameFormatter.format(message, exchange_name)
return f"retry.{queue_name}"
@staticmethod
def format_dead_letter(message: Message, exchange_name: str = None) -> str:
queue_name = RabbitMqMessageQueueNameFormatter.format(message, exchange_name)
return f"dead_letter.{queue_name}"
|
186234
|
import numpy as np
import paddle
from paddlevision.models.alexnet import alexnet
from reprod_log import ReprodLogger
if __name__ == "__main__":
paddle.set_device("cpu")
# load model
# the model is save into ~/.cache/torch/hub/checkpoints/alexnet-owt-4df8aa71.pth
# def logger
reprod_logger = ReprodLogger()
model = alexnet(
pretrained="../../weights/alexnet_paddle.pdparams", num_classes=1000)
model.eval()
# read or gen fake data
fake_data = np.load("../../fake_data/fake_data.npy")
fake_data = paddle.to_tensor(fake_data)
# forward
out = model(fake_data)
#
reprod_logger.add("logits", out.cpu().detach().numpy())
reprod_logger.save("forward_paddle.npy")
|
186290
|
from bokeh.plotting import figure, output_file, show
output_file("styling_toolbar_autohide.html")
# Basic plot setup
plot = figure(width=400, height=400, title='Toolbar Autohide')
plot.line([1,2,3,4,5], [2,5,8,2,7])
# Set autohide to true to only show the toolbar when mouse is over plot
plot.toolbar.autohide = True
show(plot)
|
186303
|
import matplotlib.pyplot as plt
import random
import os
import numpy as np
import tensorflow as tf
import logging
from load_data import load_train_data, load_data_by_id, load_entity_by_id, \
load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, \
get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary
import matplotlib
matplotlib.use('agg')
logger = logging.getLogger(__name__)
local_file = os.path.split(__file__)[-1]
logging.basicConfig(
format='%(asctime)s : %(filename)s : %(funcName)s : %(levelname)s : %(message)s',
level=logging.INFO)
def set_random_seed(seed_value):
random.seed(seed_value)
np.random.seed(seed_value)
tf.random.set_seed(seed_value)
def sample(sample_list):
return random.sample(sample_list, 1)[0]
def negative_sample(pos_id_list, id_list, sample_num):
neg_reault = []
cnt = 0
while cnt < sample_num:
negative_id = ''
while negative_id == '' or negative_id in pos_id_list:
negative_id = sample(id_list)
neg_reault.append(negative_id)
cnt += 1
return neg_reault
def gen_valid_data(data_path, word_dict, char_dict, sentence_length, can_path, prior_path, entity_embedding_path, character_length,
topk=25, alpha=0.0, voting_k=10, context_path=None, context_max_len=100):
all_data, raw_data = load_data_by_id(data_path, word_dict, char_dict, sentence_length, character_length)
candidate_dict, raw_can_dict, can_char_dict = load_candidates_by_id(word_dict, char_dict, sentence_length, can_path, character_length, topk, alpha=alpha)
mention_entity_prior = load_mention_entity_prior2(prior_path)
#coherence
entity_embedding, default_embedding = load_entity_emb(entity_embedding_path)
voting_eids = load_voting_eid_by_doc(raw_data, can_path, voting_k)
voting_emb_dict = get_embedding_of_voting(voting_eids, entity_embedding, default_embedding)
# context
mention_context_dict = load_mention_context(context_path, context_max_len, word_dict)
mention_list, candidate_list, y_list, raw_can_name_list, x_char_list, y_char_list, can_prior = [], [], [], [], [], [], []
can_emb_list, voting_emb_list = [], []
context_list = []
miss_cnt = 0
test_data = list()
for index, (mention_id, labels, char_ids) in enumerate(all_data):
label = labels
raw = raw_data[index]
doc_id, raw_mention, raw_label = raw[0], raw[1], raw[2]
mention_name = raw_mention
can_list_of_the_mention = candidate_dict[mention_name]
raw_can_dict_of_the_mention = raw_can_dict[mention_name]
for can_label, can_id in can_list_of_the_mention:
mention_list.append(mention_id)
candidate_list.append(can_id)
y_list.append(can_label)
entity_name = raw_can_dict_of_the_mention[can_label]
raw_can_name_list.append(entity_name)
#char
x_char_list.append(char_ids)
can_char_ids = can_char_dict[entity_name]
y_char_list.append(can_char_ids)
#prior
prior_value = 0
if mention_name in mention_entity_prior and can_label in mention_entity_prior[mention_name]:
prior_value = mention_entity_prior[mention_name][can_label]
can_prior.append([prior_value])
#coherence
can_emb = default_embedding
if can_label in entity_embedding:
can_emb = entity_embedding[can_label]
voting_emb = voting_emb_dict[doc_id][raw_mention]
can_emb_list.append(can_emb)
voting_emb_list.append(voting_emb)
# context
doc_id = raw_data[index][0]
mention_context = [0] * context_max_len
if doc_id in mention_context_dict and mention_name in mention_context_dict[doc_id]:
mention_context = mention_context_dict[doc_id][mention_name]
context_list.append(mention_context)
data = ({'mention': np.array(mention_list), 'candidate': np.array(candidate_list),
'entity_name': raw_can_name_list, 'men_char': np.array(x_char_list),
'can_char': np.array(y_char_list), 'can_prior':np.array(can_prior),
'candidate_emb':np.array(can_emb_list), 'voting_candidates_emb':np.array(voting_emb_list), 'can_context':np.array(context_list)
}, np.array(y_list), (label, doc_id, raw_mention))
test_data.append(data)
mention_list, candidate_list, y_list, raw_can_name_list, x_char_list, y_char_list, can_prior = [], [], [], [], [], [], []
can_emb_list, voting_emb_list = [], []
context_list = []
logging.info('test data size = {a}, miss data size = {b}'.format(a=len(all_data), b=miss_cnt))
return test_data
def gen_train_data(data_path, word_dict, char_dict, entity_path, batch_size, sentence_length, character_length, can_path, prior_path, entity_embedding_path,
topk=10, alpha=0.0, voting_k=10, context_path=None, context_max_len=100):
all_data, raw_data = load_data_by_id(data_path, word_dict, char_dict, sentence_length, character_length, mode='train')
all_entity, entity_dict, _ = load_entity_by_id(entity_path, word_dict, char_dict, sentence_length, character_length)
candidate_dict, raw_can_dict, can_char_dict = load_candidates_by_id(word_dict, char_dict, sentence_length, can_path, character_length, topk=topk, alpha=alpha)
#cohrence
entity_embedding, default_embedding = load_entity_emb(entity_embedding_path)
voting_eids = load_voting_eid_by_doc(raw_data, can_path, voting_k)
voting_emb_dict = get_embedding_of_voting(voting_eids, entity_embedding, default_embedding)
#context
mention_context_dict = load_mention_context(context_path, context_max_len, word_dict)
mention_entity_prior = load_mention_entity_prior2(prior_path)
mention_list, candidate_list, neg_candidate_list, x_char_list, y_char_list, z_char_list, pos_candidate_prior, neg_candidate_prior \
= [], [], [], [], [], [], [], []
pos_can_emb_list, neg_can_emb_list, voting_emb_list = [], [], []
context_list = []
while True:
for index, (mention_id, label, char_ids) in enumerate(all_data):
if label not in entity_dict:
pos_sample, pos_y_chars = mention_id, char_ids
else:
synonyms = entity_dict[label]
pos_sample, pos_y_chars = sample(synonyms)
# use candidates to train
mention = raw_data[index][1]
candidates_of_this_mention = candidate_dict[mention]
can_lables = [e1 for (e1, e2) in candidates_of_this_mention]
if len(can_lables) == 1:
neg_lables = negative_sample(label, list(all_entity), 1)[0]
else:
neg_lables = negative_sample(label, can_lables, 1)[0]
neg_synonyms = entity_dict[neg_lables]
neg_sample, neg_y_chars = sample(neg_synonyms)
mention_list.append(mention_id)
candidate_list.append(pos_sample)
x_char_list.append(char_ids)
y_char_list.append(pos_y_chars)
neg_candidate_list.append(neg_sample)
z_char_list.append(neg_y_chars)
pos_prior_value, neg_prior_value = 0, 0
if mention in mention_entity_prior and label in mention_entity_prior[mention]:
pos_prior_value = mention_entity_prior[mention][label]
if mention in mention_entity_prior and neg_lables in mention_entity_prior[mention]:
neg_prior_value = mention_entity_prior[mention][neg_lables]
pos_candidate_prior.append([pos_prior_value])
neg_candidate_prior.append([neg_prior_value])
#coherence
doc_id = raw_data[index][0]
pos_can_emb, neg_can_emb = default_embedding, default_embedding
if label in entity_embedding:
pos_can_emb = entity_embedding[label]
if neg_lables in entity_embedding:
neg_can_emb = entity_embedding[neg_lables]
voting_emb = voting_emb_dict[doc_id][mention]
pos_can_emb_list.append(pos_can_emb)
neg_can_emb_list.append(neg_can_emb)
voting_emb_list.append(voting_emb)
#context
doc_id = raw_data[index][0]
if mention in mention_context_dict[doc_id]:
mention_context = mention_context_dict[doc_id][mention]
else: mention_context = [0]*context_max_len
context_list.append(mention_context)
if len(mention_list) % batch_size == 0:
yield {'mention': np.array(mention_list), 'pos_candidate':np.array(candidate_list),
'men_char':np.array(x_char_list), 'pos_can_char':np.array(y_char_list)
,'neg_candidate':np.array(neg_candidate_list), 'neg_can_char':np.array(z_char_list),
'pos_can_prior':np.array(pos_candidate_prior), 'neg_can_prior':np.array(neg_candidate_prior)
,'pos_candidate_emb':np.array(pos_can_emb_list), 'neg_candidate_emb':np.array(neg_can_emb_list),
'voting_candidates_emb':np.array(voting_emb_list), 'can_context':np.array(context_list)}, \
np.array(mention_list),
mention_list, candidate_list, neg_candidate_list, x_char_list, y_char_list, z_char_list, pos_candidate_prior, neg_candidate_prior \
= [], [], [], [], [], [], [], []
pos_can_emb_list, neg_can_emb_list, voting_emb_list = [], [], []
context_list = []
def make_loss_picture(history):
print('Plot validation accuracy and loss...')
# acc=history.history['acc']
# val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
# plt.plot(acc, label='acc')
# plt.plot(val_acc, label='val_acc')
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'valid'], loc='upper left')
# plt.savefig('../checkpoints/acc.png')
# plt.close()
plt.plot(loss, label='loss')
plt.plot(val_loss, label='val_loss')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.savefig('../checkpoints/loss.png')
if __name__ == '__main__':
can_path = '../output/adr/candidates/training_aligned_cos_with_mention_candidate.txt'
context_path = '../output/adr/context/train_mention_context.txt'
word_vocab_path = '../output/adr/word_vocabulary.dict'
char_vocab_path = '../output/adr/char_vocabulary.dict'
prior_path = '../output/adr/mention_entity_prior.txt'
word_dict, word_list = load_word_vocabulary(word_vocab_path, True)
char_dict, char_list = load_char_vocabulary(char_vocab_path)
data = gen_train_data(data_path='../output/adr/train_data.txt', word_dict=word_dict, char_dict=char_dict,
entity_path='../output/adr/entity_kb.txt', batch_size=6,topk=20, alpha=0.0,
sentence_length=20, character_length=25, can_path=can_path,
prior_path=prior_path,
entity_embedding_path='../output/adr/embed/entity_emb_50.txt',
context_path=context_path)
cnt = 0
for train, y in data:
cnt += 1
mention, pos_candidate, neg_candidate = train['mention'], train['pos_candidate'], train['neg_candidate']
men_char, pos_can_char, neg_can_char = train['men_char'], train['pos_can_char'], train['neg_can_char']
voting = train['can_context']
print(mention)
if len(np.shape(mention)) != 2:
print(mention)
raise Exception('error')
|
186353
|
from typing import Tuple
from dexp.utils import xpArray
from dexp.utils.backends import Backend
def fit_to_shape(array: xpArray, shape: Tuple[int, ...]) -> xpArray:
"""
Pads or crops an array to attain a certain shape
Parameters
----------
backend : backend to use
array : array to pad
shape : shape to attain by cropping or padding each dimension
Returns
-------
Array of requested shape
"""
length_diff = tuple(u - v for u, v in zip(shape, array.shape))
if any(x < 0 for x in length_diff):
# we need to crop at least one dimension:
slicing = tuple(slice(0, s) for s in shape)
array = array[slicing]
# Independently of whether we had to crop a dimension, we proceed with eventual padding:
length_diff = tuple(u - v for u, v in zip(shape, array.shape))
if any(x > 0 for x in length_diff):
xp = Backend.get_xp_module()
pad_width = tuple(tuple((0, d)) for d in length_diff)
array = xp.pad(array, pad_width=pad_width)
return array
|
186388
|
import csv
"""
This file does the analysis of true positives with no prior common neighbours as reported in Section 6.4 of the paper.
"""
def get_positives(input_data_file = 'test.tsv'):
with open(input_data_file) as tsv:
positives = []
for ind, line in enumerate(csv.reader(tsv, delimiter="\t")): #quoting=csv.QUOTE_NONE - If req to make data work, examine data
if line[2] == 'I-LINK':
entity1 = line[0].replace(' ', '_')
entity2 = line[1].replace(' ', '_')
positives.append("{}::{}".format(entity1, entity2))
return positives
def get_graph(vertices_data_file = 'vertices.txt', graph_data_file = 'graph.adjlist'):
#Read in vertices and node indexes
vertices = {}
vertices_file = open(vertices_data_file, 'r')
for line in vertices_file:
line = line.split()
vertices[line[0]] = line[1]
#Read in graph adjacency list
graph = {}
graph_file = open(graph_data_file, 'r')
for line in graph_file:
line = line.split()
if line[0] in vertices:
node = vertices[line[0]]
graph[node] = [vertices[node_] for node_ in line[1:]]
else:
print("Node index: %s not in vertices." % line[0])
return graph
def calc_recall_limit(graph, test_positives, bipartite, cn_threshold=0):
limited = 0
for edge in test_positives:
entity1 = edge.split('::')[0]
entity2 = edge.split('::')[1]
cn = get_common_neighbours(graph, entity1, entity2, bipartite)
if int(cn) <= int(cn_threshold):
limited += 1
print("{}/{} positives less than {}.".format(limited, len(test_positives), cn_threshold))
return limited/float(len(test_positives))
def get_common_neighbours(graph, entity1, entity2, bipartite):
entity1_set = set(graph[entity1])
entity2_set = set(graph[entity2])
if bipartite:
#Get neighbours of neighbours of this node to use
entity2_lst = []
for node in entity2_set:
entity2_lst += graph[node]
entity2_set = set(entity2_lst)
cn = len(entity2_set.intersection(entity1_set))
return cn
def get_recall_limited_edges(graph, test_positives, bipartite, cn_threshold=0):
limited = []
for edge in test_positives:
entity1 = edge.split('::')[0]
entity2 = edge.split('::')[1]
cn = get_common_neighbours(graph, entity1, entity2, bipartite)
if int(cn) <= int(cn_threshold):
limited.append(edge)
return limited
def get_overall_rankings(edge_lst, ranked_filename):
print("Analysing: {}".format(ranked_filename))
with open(ranked_filename) as tsv:
ranked_edges = {}
for ind, line in enumerate(csv.reader(tsv, delimiter="\t")):
if ind == 0:
continue
ranked_edges[line[0]] = ind
rankings = []
first_quartile = []
second_quartile = []
third_quartile = []
fourth_quartile = []
total_edges = len(ranked_edges)
quartile_size = total_edges / 4
for edge in edge_lst:
rank = ranked_edges[edge]
if rank < quartile_size:
quartile = 'First'
first_quartile.append(edge)
elif rank < (quartile_size * 2):
quartile = 'Second'
second_quartile.append(edge)
elif rank < (quartile_size * 3):
quartile = 'Third'
third_quartile.append(edge)
else:
quartile = 'Fourth'
fourth_quartile.append(edge)
rankings.append((edge, "{} ({})".format(quartile, rank)))
print("Total: {}".format(total_edges))
print("First: {}% ({}/{}). Second: {}% ({}/{}). Third: {}% ({}/{}). Fourth: {}% ({}/{}).".format((len(first_quartile)/float(len(edge_lst))) * 100, len(first_quartile), len(edge_lst),
(len(second_quartile)/float(len(edge_lst))) * 100, len(second_quartile), len(edge_lst),
(len(third_quartile)/float(len(edge_lst))) * 100, len(third_quartile), len(edge_lst),
(len(fourth_quartile)/float(len(edge_lst))) * 100, len(fourth_quartile), len(edge_lst)))
def main():
bipartite = False
cn_threshold = 0
#Get these logs by uncommenting line in link_prediction.py calling log_predictions()
folder = '/path/to/logs'
print("Getting positives...")
positives = get_positives('{}/test.tsv'.format(folder))
print("Reading graph...")
graph = get_graph('{}/vertices.txt'.format(folder), '{}/graph.adjlist'.format(folder))
print("Calculating recall limitedness...")
recall_limit = calc_recall_limit(graph, positives, bipartite, cn_threshold)
print("{}% of the positives had no common neighbours.".format(recall_limit * 100))
print("Calculating rankings...")
recall_limited_edges = get_recall_limited_edges(graph, positives, bipartite, cn_threshold)
get_overall_rankings(recall_limited_edges, '{}/ranked-edges-all-common_neighbours.tsv'.format(folder))
get_overall_rankings(recall_limited_edges, '{}/ranked-edges-all-deepwalk-concatenate.tsv'.format(folder))
main()
|
186399
|
from rlkit.samplers.util import rollout
from rlkit.samplers.rollout_functions import multitask_rollout
import numpy as np
class InPlacePathSampler(object):
"""
A sampler that does not serialization for sampling. Instead, it just uses
the current policy and environment as-is.
WARNING: This will affect the environment! So
```
sampler = InPlacePathSampler(env, ...)
sampler.obtain_samples # this has side-effects: env will change!
```
"""
def __init__(self, env, policy, max_samples, max_path_length, randomize_env=False, alg=None):
self.env = env
self.policy = policy
self.max_path_length = max_path_length
self.max_samples = max_samples
assert max_samples >= max_path_length, "Need max_samples >= max_path_length"
self.randomize_env = randomize_env
self.alg = alg
def start_worker(self):
pass
def shutdown_worker(self):
pass
def obtain_samples(self, rollout_type="multitask"):
paths = []
n_steps_total = 0
while n_steps_total + self.max_path_length <= self.max_samples:
if self.randomize_env:
self.env, env_name = self.alg.get_new_env()
print(f"Evaluating {env_name}")
if rollout_type == "multitask":
path = multitask_rollout(
self.env,
self.policy,
max_path_length=self.max_path_length,
animated=False,
observation_key='observation',
desired_goal_key='desired_goal',
get_action_kwargs=dict(
return_stacked_softmax=False,
mask=np.ones((1, self.env.unwrapped.num_blocks)),
deterministic=True
)
)
else:
path = rollout(
self.env, self.policy, max_path_length=self.max_path_length
)
paths.append(path)
n_steps_total += len(path['observations'])
return paths
|
186442
|
import inspect
import numpy as np
from limix.core.type.exception import NotArrayConvertibleError
def my_name():
return inspect.stack()[1][3]
def assert_finite_array(*arrays):
for a in arrays:
if not np.isfinite(a).all():
raise ValueError("Array must not contain infs or NaNs")
def assert_make_float_array(arr, arg_name):
try:
arr = np.asarray(arr, dtype=float)
except ValueError as e:
raise NotArrayConvertibleError("%s has to be float-array "
"convertible." % arg_name)
return arr
def assert_type(arg, type_, param_name):
err_msg = ("Parameter %s is not of type %s.%s."
% (param_name, type_.__module__, type_.__name__))
if type(arg) is not type_:
raise TypeError(err_msg)
def assert_subtype(arg, type_, param_name):
err_msg = ("Parameter %s must have %s.%s inheritance."
% (param_name, type_.__module__, type_.__name__))
if not issubclass(type(arg), type_):
raise TypeError(err_msg)
def assert_type_or_list_type(arg, type_, param_name):
err_msg = ("Parameter %s is not of type "
"%s.%s nor a list or a tuple of the same."
% (param_name, type_.__module__, type_.__name__))
if type(arg) in (list, tuple):
for a in arg:
if type(a) is not type_:
raise TypeError(err_msg)
else:
if type(arg) is not type_:
raise TypeError(err_msg)
def assert_subtype_or_list_subtype(arg, type_, param_name):
err_msg = ("Parameter %s is not of type "
"%s.%s nor a list or a tuple of the same."
% (param_name, type_.__module__, type_.__name__))
if type(arg) in (list, tuple):
for a in arg:
if not issubclass(type(a), type_):
raise TypeError(err_msg)
else:
if issubclass(type(arg), type_):
raise TypeError(err_msg)
|
186446
|
import urllib.request
import os
import argparse
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser()
parser.add_argument("url", type=str, nargs=1, help="Main url with list of recipe URLs")
parser.add_argument("cuisine", type=str, nargs=1, help="Type of cuisine on the main url page")
parser.add_argument("pageNum", type=int, nargs=1, help="Page number to pull from")
#parser.add_argument("fileStart", type=int, nargs=1, help="number to start filenames on")
args = parser.parse_args()
cuisine = str(args.cuisine[0]).lower()
page = str(args.pageNum[0])
main_url = str(args.url[0]) + "?sort=Newest&page=" + page
#local_filename, headers = urllib.request.urlretrieve(main_url)
try:local_filename, headers = urllib.request.urlretrieve(main_url)
except:
print("\n### Unable to open webpage " + main_url + " ### \n")
exit(-1)
url_file = open(local_filename)
html = url_file.read()
soup = BeautifulSoup(html, 'html.parser')
div = soup.find_all('article', class_='grid-col--fixed-tiles')
url_list = []
for item in div:
for a in item.find_all('a', href=True):
if "/recipe" in a['href']:
if a['href'] not in url_list:
url_list.append(a['href'])
url_file.close()
filenum = len(os.listdir("html/" + cuisine))
for url in url_list:
if filenum > 160:
break
urlname = "http://allrecipes.com" + url
html_filename = "html/" + cuisine +"/" + cuisine + str(filenum) + ".html"
html_file = open(html_filename, 'w')
print(urlname, filenum)
try:local_filename, headers = urllib.request.urlretrieve(urlname)
except:
print("UNABLE TO OPEN " + urlname)
exit(-1)
file_ = open(local_filename)
data = file_.read()
html_file.write(data)
html_file.close()
file_.close()
filenum += 1
print("Done")
|
186454
|
questions = open('youtube_chat.txt', 'r').readlines()
with open('question_dataset.txt', 'w+') as file:
for s in set(questions):
print(s.rstrip()[1:-1], file=file)
|
186462
|
import pymbar
from fe import endpoint_correction
from collections import namedtuple
import pickle
import dataclasses
import time
import functools
import copy
import jax
import numpy as np
from md import minimizer
from typing import Tuple, List, Any
import os
from fe import standard_state
from fe.utils import sanitize_energies, extract_delta_Us_from_U_knk
from timemachine.lib import potentials, custom_ops
@dataclasses.dataclass
class SimulationResult:
xs: np.array
boxes: np.array
du_dps: np.array
lambda_us: np.array
def flatten(v):
return tuple(), (v.xs, v.boxes, v.du_dps, v.lambda_us)
def unflatten(aux_data, children):
xs, boxes, du_dps, lambda_us = aux_data
return SimulationResult(xs, boxes, du_dps, lambda_us)
jax.tree_util.register_pytree_node(SimulationResult, flatten, unflatten)
def run_model_simulations(model, sys_params):
assert len(sys_params) == len(model.unbound_potentials)
bound_potentials = []
for params, unbound_pot in zip(sys_params, model.unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
all_args = []
for lamb_idx, lamb in enumerate(model.lambda_schedule):
subsample_interval = 1000
all_args.append(
(
lamb,
model.box,
model.x0,
model.v0,
bound_potentials,
model.integrator,
model.barostat,
model.equil_steps,
model.prod_steps,
subsample_interval,
subsample_interval,
model.lambda_schedule,
)
)
if model.endpoint_correct:
assert isinstance(bound_potentials[-1], potentials.HarmonicBond)
all_args.append(
(
1.0,
model.box,
model.x0,
model.v0,
bound_potentials[:-1], # strip out the restraints
model.integrator,
model.barostat,
model.equil_steps,
model.prod_steps,
subsample_interval,
subsample_interval,
[], # no need to evaluate Us for the endpoint correction
)
)
results = []
if model.client is None:
for args in all_args:
results.append(simulate(*args))
else:
futures = []
for args in all_args:
futures.append(model.client.submit(simulate, *args))
for future in futures:
results.append(future.result())
return results
def simulate(
lamb,
box,
x0,
v0,
final_potentials,
integrator,
barostat,
equil_steps,
prod_steps,
x_interval,
u_interval,
lambda_windows,
):
"""
Run a simulation and collect relevant statistics for this simulation.
Parameters
----------
lamb: float
lambda value used for the equilibrium simulation
box: np.array
3x3 numpy array of the box, dtype should be np.float64
x0: np.array
Nx3 numpy array of the coordinates
v0: np.array
Nx3 numpy array of the velocities
final_potentials: list
list of unbound potentials
integrator: timemachine.Integrator
integrator to be used for dynamics
barostat: timemachine.Barostat
barostat to be used for equilibration
equil_steps: int
number of equilibration steps
prod_steps: int
number of production steps
x_interval: int
how often we store coordinates. If x_interval == 0 then
no frames are returned.
u_interval: int
how often we store energies. If u_interval == 0 then
no energies are returned
lambda_windows: list of float
lambda windows we evaluate energies at.
Returns
-------
SimulationResult
Results of the simulation.
"""
all_impls = []
# set up observables for du_dps here as well.
du_dp_obs = []
for bp in final_potentials:
impl = bp.bound_impl(np.float32)
all_impls.append(impl)
du_dp_obs.append(custom_ops.AvgPartialUPartialParam(impl, 25))
# fire minimize once again, needed for parameter interpolation
x0 = minimizer.fire_minimize(x0, all_impls, box, np.ones(100, dtype=np.float64) * lamb)
# sanity check that forces are well behaved
for bp in all_impls:
du_dx, du_dl, u = bp.execute(x0, box, lamb)
norm_forces = np.linalg.norm(du_dx, axis=1)
assert np.all(norm_forces < 25000), "Forces much greater than expected after minimization"
if integrator.seed == 0:
# this deepcopy is needed if we're running if client == None
integrator = copy.deepcopy(integrator)
integrator.seed = np.random.randint(np.iinfo(np.int32).max)
if barostat.seed == 0:
barostat = copy.deepcopy(barostat)
barostat.seed = np.random.randint(np.iinfo(np.int32).max)
intg_impl = integrator.impl()
# technically we need to only pass in the nonbonded impl
barostat_impl = barostat.impl(all_impls)
# context components: positions, velocities, box, integrator, energy fxns
ctxt = custom_ops.Context(x0, v0, box, intg_impl, all_impls, barostat_impl)
# equilibration
equil_schedule = np.ones(equil_steps) * lamb
ctxt.multiple_steps(equil_schedule)
# (ytz): intentionally hard-coded, I'd rather the end-user *not*
# muck with this unless they have a good reason to.
barostat_impl.set_interval(25)
for obs in du_dp_obs:
ctxt.add_observable(obs)
full_us, xs, boxes = ctxt.multiple_steps_U(lamb, prod_steps, np.array(lambda_windows), u_interval, x_interval)
# keep the structure of grads the same as that of final_potentials so we can properly
# form their vjps.
grads = []
for obs in du_dp_obs:
grads.append(obs.avg_du_dp())
result = SimulationResult(
xs=xs.astype("float32"),
boxes=boxes.astype("float32"),
du_dps=grads,
lambda_us=full_us,
)
return result
FreeEnergyModel = namedtuple(
"FreeEnergyModel",
[
"unbound_potentials",
"endpoint_correct",
"client",
"box",
"x0",
"v0",
"integrator",
"barostat",
"lambda_schedule",
"equil_steps",
"prod_steps",
"beta",
"prefix",
],
)
gradient = List[Any] # TODO: make this more descriptive of dG_grad structure
def _deltaG_from_results(model, results, sys_params) -> Tuple[Tuple[float, List], np.array]:
assert len(sys_params) == len(model.unbound_potentials)
bound_potentials = []
for params, unbound_pot in zip(sys_params, model.unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
if model.endpoint_correct:
sim_results = results[:-1]
else:
sim_results = results
U_knk = []
N_k = []
for result in sim_results:
U_knk.append(result.lambda_us)
N_k.append(len(result.lambda_us)) # number of frames
U_knk = np.array(U_knk)
bar_dG = 0
bar_dG_err = 0
delta_Us = extract_delta_Us_from_U_knk(U_knk)
for lambda_idx in range(len(model.lambda_schedule) - 1):
fwd_delta_u = model.beta * delta_Us[lambda_idx][0]
rev_delta_u = model.beta * delta_Us[lambda_idx][1]
dG_exact, exact_bar_err = pymbar.BAR(fwd_delta_u, rev_delta_u)
bar_dG += dG_exact / model.beta
exact_bar_overlap = endpoint_correction.overlap_from_cdf(fwd_delta_u, rev_delta_u)
# probably off by a factor of two since we re-use samples.
bar_dG_err += (exact_bar_err / model.beta) ** 2
lamb_start = model.lambda_schedule[lambda_idx]
lamb_end = model.lambda_schedule[lambda_idx + 1]
print(
f"{model.prefix}_BAR: lambda {lamb_start:.3f} -> {lamb_end:.3f} dG: {dG_exact/model.beta:.3f} dG_err: {exact_bar_err/model.beta:.3f} overlap: {exact_bar_overlap:.3f}"
)
# for MBAR we need to sanitize the energies
clean_U_knks = [] # [K, F, K]
for lambda_idx, full_us in enumerate(U_knk):
clean_U_knks.append(sanitize_energies(full_us, lambda_idx))
print(
model.prefix,
" MBAR: amin",
np.amin(clean_U_knks),
"median",
np.median(clean_U_knks),
"max",
np.amax(clean_U_knks),
)
K = len(model.lambda_schedule)
clean_U_knks = np.array(clean_U_knks) # [K, F, K]
U_kn = np.reshape(clean_U_knks, (-1, K)).transpose() # [K, F*K]
u_kn = U_kn * model.beta
np.save(model.prefix + "_U_kn.npy", U_kn)
mbar = pymbar.MBAR(u_kn, N_k)
differences, error_estimates = mbar.getFreeEnergyDifferences()
f_k, error_k = differences[0], error_estimates[0]
mbar_dG = f_k[-1] / model.beta
mbar_dG_err = error_k[-1] / model.beta
bar_dG_err = np.sqrt(bar_dG_err)
dG = bar_dG # use the exact answer
dG_grad = []
# (ytz): results[-1].du_dps contain system parameter derivatives for the
# independent, gas phase simulation. They're usually ordered as:
# [Bonds, Angles, Torsions, Nonbonded]
#
# results[0].du_dps contain system parameter derivatives for the core
# restrained state. If we're doing the endpoint correction during
# decoupling stages, the derivatives are ordered as:
# [Bonds, Angles, Torsions, Nonbonded, RestraintBonds]
# Otherwise, in stages like conversion where the endpoint correction
# is turned off, the derivatives are ordered as :
# [Bonds, Angles, Torsions, Nonbonded]
# Note that this zip will always loop over only the
# [Bonds, Angles, Torsions, Nonbonded] terms, since it only
# enumerates over the smaller of the two lists.
for rhs, lhs in zip(results[-1].du_dps, results[0].du_dps):
dG_grad.append(rhs - lhs)
if model.endpoint_correct:
assert len(results[0].du_dps) - len(results[-1].du_dps) == 1
# (ytz): Fill in missing derivatives since zip() from above loops
# over the shorter array.
lhs = results[0].du_dps[-1]
rhs = 0 # zero as the energies do not depend the core restraints.
dG_grad.append(rhs - lhs)
core_restr = bound_potentials[-1]
# (ytz): tbd, automatically find optimal k_translation/k_rotation such that
# standard deviation and/or overlap is maximized
k_translation = 200.0
k_rotation = 100.0
start = time.time()
lhs_du, rhs_du, rotation_samples, translation_samples = endpoint_correction.estimate_delta_us(
k_translation=k_translation,
k_rotation=k_rotation,
core_idxs=core_restr.get_idxs(),
core_params=core_restr.params.reshape((-1, 2)),
beta=model.beta,
lhs_xs=results[-2].xs,
rhs_xs=results[-1].xs,
seed=2021,
)
dG_endpoint, endpoint_err = pymbar.BAR(model.beta * lhs_du, model.beta * np.array(rhs_du))
dG_endpoint = dG_endpoint / model.beta
endpoint_err = endpoint_err / model.beta
# compute standard state corrections for translation and rotation
dG_ssc_translation, dG_ssc_rotation = standard_state.release_orientational_restraints(
k_translation, k_rotation, model.beta
)
overlap = endpoint_correction.overlap_from_cdf(lhs_du, rhs_du)
lhs_mean = np.mean(lhs_du)
rhs_mean = np.mean(rhs_du)
print(
f"{model.prefix} bar (A) {bar_dG:.3f} bar_err {bar_dG_err:.3f} mbar (A) {mbar_dG:.3f} mbar_err {mbar_dG_err:.3f} dG_endpoint (E) {dG_endpoint:.3f} dG_endpoint_err {endpoint_err:.3f} dG_ssc_translation {dG_ssc_translation:.3f} dG_ssc_rotation {dG_ssc_rotation:.3f} overlap {overlap:.3f} lhs_mean {lhs_mean:.3f} rhs_mean {rhs_mean:.3f} lhs_n {len(lhs_du)} rhs_n {len(rhs_du)} | time: {time.time()-start:.3f}s"
)
dG += dG_endpoint + dG_ssc_translation + dG_ssc_rotation
bar_dG_err = np.sqrt(bar_dG_err ** 2 + endpoint_err ** 2)
else:
print(
f"{model.prefix} bar (A) {bar_dG:.3f} bar_err {bar_dG_err:.3f} mbar (A) {mbar_dG:.3f} mbar_err {mbar_dG_err:.3f} "
)
return (dG, bar_dG_err, results), dG_grad
@functools.partial(
jax.custom_vjp,
nondiff_argnums=(
0,
1,
),
)
def deltaG_from_results(model, results, sys_params) -> Tuple[float, List]:
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)[0]
def deltaG_from_results_fwd(model, results, sys_params) -> Tuple[Tuple[float, List], np.array]:
"""same signature as DeltaG_from_results, but returns the full tuple"""
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)
def deltaG_from_results_bwd(model, results, residual, grad) -> Tuple[np.array]:
"""Note: nondiff args must appear first here, even though one of them appears last in the original function's signature!"""
# residual are the partial dG / partial dparams for each term
# grad[0] is the adjoint of dG w.r.t. loss: partial L/partial dG
# grad[1] is the adjoint of dG_err w.r.t. loss: which we don't use
# grad[2] is the adjoint of simulation results w.r.t. loss: which we don't use
return ([grad[0] * r for r in residual],)
@functools.partial(jax.custom_vjp, nondiff_argnums=(0,))
def deltaG(model, sys_params) -> Tuple[float, List]:
results = run_model_simulations(model, sys_params)
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)[0]
def deltaG_fwd(model, sys_params) -> Tuple[Tuple[float, List], np.array]:
"""same signature as DeltaG_from_results, but returns the full tuple"""
results = run_model_simulations(model, sys_params)
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)
def deltaG_bwd(model, residual, grad) -> Tuple[np.array]:
"""Note: nondiff args must appear first here, even though one of them appears last in the original function's signature!"""
# residual are the partial dG / partial dparams for each term
# grad[0] is the adjoint of dG w.r.t. loss: partial L/partial dG
# grad[1] is the adjoint of dG_err w.r.t. loss: which we don't use
# grad[2] is the adjoint of simulation results w.r.t. loss: which we don't use
return ([grad[0] * r for r in residual],)
deltaG_from_results.defvjp(deltaG_from_results_fwd, deltaG_from_results_bwd)
deltaG.defvjp(deltaG_fwd, deltaG_bwd)
|
186492
|
from flask import Flask, render_template, request, redirect, url_for
import uuid
class Task:
def __init__(self, task):
self.id = uuid.uuid1().hex
self.task = task
self.status = 'active'
self.completed = False
def toggle(self):
if self.status == 'active':
self.status = 'completed'
self.completed = True
else:
self.status = 'active'
self.completed = False
# Global state, yay!
tasks = {}
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html', tasks=tasks)
@app.route('/todos', methods = ['POST'])
def todos():
task = Task(task=request.form['item-text'])
tasks[task.id] = task
return redirect(url_for('index'))
@app.route('/delete', methods = ['POST'])
def delete_todos():
del(tasks[request.form['id']])
return redirect(url_for('index'))
@app.route('/toggle', methods = ['POST'])
def toggle_todo():
tasks[request.form['id']].toggle()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
|
186501
|
import unittest
from osm_export_tool.sources import Overpass
from osm_export_tool.mapping import Mapping
class TestMappingToOverpass(unittest.TestCase):
def test_mapping(self):
y = '''
buildings:
types:
- points
select:
- column1
where: column2 IS NOT NULL
other1:
types:
- points
- polygons
select:
- column1
- irrelevant
where: column2 IS NOT NULL AND column3 IN ('foo','bar')
other2:
types:
- lines
select:
- column5:key
'''
mapping = Mapping(y)
nodes, ways, relations = Overpass.filters(mapping)
self.assertCountEqual(nodes,["['column3'~'foo|bar']","['column2']"])
# force quoting of strings to handle keys with colons
self.assertCountEqual(ways,["['column5:key']","['column3'~'foo|bar']","['column2']"])
self.assertCountEqual(relations,["['column3'~'foo|bar']","['column2']"])
class TestSQLToOverpass(unittest.TestCase):
def test_basic(self):
s = Overpass.sql("name = 'somename'")
self.assertEqual(s,["['name'='somename']"])
s = Overpass.sql("level > 4")
self.assertEqual(s,["['level']"])
def test_basic_list(self):
s = Overpass.sql("name IN ('val1','val2')")
self.assertEqual(s,["['name'~'val1|val2']"])
def test_whitespace(self):
s = Overpass.sql("name = 'some value'")
self.assertEqual(s,["['name'='some value']"])
def test_notnull(self):
s = Overpass.sql("name is not null")
self.assertEqual(s,["['name']"])
def test_and_or(self):
s = Overpass.sql("name1 = 'foo' or name2 = 'bar'")
self.assertEqual(s,["['name1'='foo']","['name2'='bar']"])
s = Overpass.sql("(name1 = 'foo' and name2 = 'bar') or name3 = 'baz'")
self.assertEqual(s,["['name1'='foo']","['name2'='bar']","['name3'='baz']"])
|
186539
|
db_config = {
'user': '##username##',
'passwd': '<PASSWORD>##',
'host': '##host##',
'db': 'employees',
}
|
186565
|
import logging
import numpy as np
from tramp.models import glm_generative
from tramp.experiments import save_experiments, BayesOptimalScenario
from tramp.algos import EarlyStopping
def run_perceptron(N, alpha, p_pos):
model = glm_generative(
N=N, alpha=alpha,
ensemble_type="gaussian", prior_type="binary", output_type="sgn",
prior_p_pos=p_pos
)
scenario = BayesOptimalScenario(model, x_ids=["x"])
early = EarlyStopping()
records = scenario.run_all(max_iter=200, callback=early)
return records
if __name__=="__main__":
csv_file = __file__.replace(".py", ".csv")
logging.basicConfig(level=logging.INFO)
save_experiments(
run_perceptron, csv_file,
N=1000, p_pos=[0.25, 0.50, 0.75], alpha=np.linspace(0, 2, 101)[1:]
)
|
186594
|
import random, copy
def read_file(name):
"""
Input the file name and return a dictionary to store the graph.
"""
with open(name, 'r') as data:
line = data.read().strip().split("\n")
graph_dict = {}
for element in line:
line_list = list(map(int, element.strip().split("\t")))
graph_dict[line_list[0]] = line_list[1:]
return graph_dict
def random_pick(new_dict):
"""
Given a graph dictionary, return a randomly selected pair a and b.
"""
a = random.choice(list(new_dict.keys()))
b = random.choice(new_dict[a])
selected_pair = (a,b)
return selected_pair
def karger(new_dict):
"""
Return the min_cut in a single loop/trial.
"""
num = []
while len(new_dict)>2:
a,b = random_pick(new_dict)
# merge two vertices
new_dict[a].extend(new_dict[b])
# add a/delete b in vertices connected with b
for c in new_dict[b]:
new_dict[c].remove(b)
new_dict[c].append(a)
# delete self-loops of vertice a
while a in new_dict[a]:
new_dict[a].remove(a)
# delete vertice b
del new_dict[b]
for key in new_dict:
num.append(len(new_dict[key]))
return num[0]
def combine(n, name):
"""
Arguments
n: the number of iterations/trials.
name: input file name
Output
min_cut: the minimum cut
"""
graph = read_file(name)
min_cut = 1000
for i in range(n):
G = copy.deepcopy(graph)
cut = karger(G)
if cut < min_cut:
min_cut = cut
return min_cut
|
186599
|
from .queries import TerminalQuery, QueryParams
from .search import Searcher
from .services import SeqmotifService, SequenceService, StructureService, StructMotifService, TextService
class Command:
def __init__(self, url="https://search.rcsb.org/rcsbsearch/v1/query?", resp_type="entry",
start=0, rows=100):
self._set_source(url, resp_type)
self._set_resp_limits(start, rows)
def _set_source(self, url, resp_type):
self.url = url
if resp_type not in QueryParams.RETURN_TYPES.value:
raise NameError('Unknown return type, available {}'.format(QueryParams.RETURN_TYPES.value))
self.resp_type = resp_type
def _set_resp_limits(self, start, rows):
self.start = start
self.rows = rows
@classmethod
def set_source(cls, url, resp_type):
cls.url = url
if resp_type not in QueryParams.RETURN_TYPES.value:
raise NameError('Unknown return type, available {}'.format(QueryParams.RETURN_TYPES.value))
cls.resp_type = resp_type
@classmethod
def set_parser(cls, parser):
cls.parser = parser
def execute(self):
pass
class SearchMotifCommand(Command):
def __init__(self, query, type_='prosite', *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.query = query
self.type_ = type_
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
SeqmotifService(self.query, self.type_, "pdb_protein_sequence"), self.resp_type,
start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class SequenceSimilarityCommand(Command):
def __init__(self, sequence, evalue=1, identity=0.9, *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.sequence = sequence
self.evalue = evalue
self.identity = identity
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
SequenceService(self.sequence, self.evalue, self.identity, "pdb_protein_sequence"),
self.resp_type, start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class StructureSimilarityCommand(Command):
def __init__(self, entry_id, assembly_id=1, operator="strict_shape_match", *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.entry_id = entry_id
self.assembly_id = assembly_id
self.operator = operator
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
StructureService(self.entry_id, self.assembly_id, self.operator), self.resp_type,
start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class StructureMotifCommand(Command):
def __init__(self, entry_id, residue_ids, score_cutoff=0, exchanges=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.entry_id = entry_id
self.residue_ids = residue_ids
self.score_cutoff = score_cutoff
self.exchanges = exchanges or {}
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
StructMotifService(self.entry_id, self.residue_ids, self.score_cutoff, self.exchanges),
self.resp_type, start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class TextCommand(Command):
def __init__(self, attribute, operator, value, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attribute = attribute
self.operator = operator
self.value = value
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
TextService(self.attribute, self.operator, self.value),
self.resp_type, start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
@staticmethod
def get_doc():
TextService.set_input_params()
return TextService.input_params
|
186603
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from logging import getLogger
from six import add_metaclass, iteritems
logger = getLogger(__name__)
@add_metaclass(ABCMeta)
class Plugin(object):
"""
Base class for plugins. A plugin is used to add functionality related to frameworks.
"""
def __init__(self, name):
"""
Initialize plugin.
:param name: the name of the plugin.
"""
self.name = name
def supports(self, config, context):
"""
Whether this plugin supports generating payload for the current configuration, request and context.
:param exception: current exception.
:param config: honeybadger configuration.
:param context: current honeybadger context.
:return: True if plugin can generate payload for current exception, False else.
"""
return False
@abstractmethod
def generate_payload(self, config, context):
"""
Return additional payload for given exception. May be used by actual plugin implementations to gather additional
information.
:param config: honeybadger configuration
:param context: context gathered so far to send to honeybadger.
:return: a dictionary with the generated payload.
"""
pass
class PluginManager(object):
"""
Manages lifecycle of plugins.
"""
def __init__(self):
self._registered = OrderedDict()
def register(self, plugin):
"""
Register the given plugin. Registration order is kept.
:param plugin: the plugin to register.
"""
if plugin.name not in self._registered:
logger.info('Registering plugin %s' % plugin.name)
self._registered[plugin.name] = plugin
else:
logger.warning('Plugin %s already registered' % plugin.name)
def generate_payload(self, default_payload, config=None, context=None):
"""
Generate payload by iterating over registered plugins. Merges .
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload.
"""
for name, plugin in iteritems(self._registered):
if plugin.supports(config, context):
logger.debug('Returning payload from plugin %s' % name)
default_payload = plugin.generate_payload(default_payload, config, context)
else:
logger.debug('No active plugin to generate payload')
return default_payload
# Global plugin manager
default_plugin_manager = PluginManager()
|
186605
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'TRX',
'author': '<NAME>, based on Paterva\'s library',
'url': 'https://github.com/krmaxwell/TRX',
'download_url': 'https://github.com/krmaxwell/TRX',
'author_email': '<EMAIL>',
'version': '0.2',
'install_requires': ['nose', 'coverage', 'xmltodict'],
'packages': ['TRX'],
'scripts': [],
'name': 'TRX'
}
setup(**config)
|
186641
|
from ..crypto import blake160
from ..crypto import get_account_id_from_public
from .keytype import KeyType
def key_from_public_key(key_type: KeyType, public_key: str) -> bytes:
if key_type == KeyType.PLATFORM:
return get_account_id_from_public(public_key)
elif key_type == KeyType.ASSET:
return blake160(public_key)
else:
raise ValueError("Invalid key type")
|
186652
|
from .tensor import Tensor
from .context import Context
from .layer import Layer, Model
from .parameter import Parameter
from .allocator import Allocator
from .memory import Memory
from .device import Device
from . import config
|
186690
|
from datetime import datetime
from django.test import TestCase
from calaccess_campaign_browser import models
class ModelTest(TestCase):
"""
Create model objects and try out their attributes.
"""
def test_models(self):
obj = models.Filer.objects.create(
name="FooPAC",
filer_id_raw=1,
xref_filer_id=1,
filer_type="PAC",
party='0',
status='A',
effective_date=datetime.now()
)
obj.__unicode__()
obj.slug
obj.real_filings
obj.total_contributions
obj.meta()
obj.klass()
obj.doc()
obj.to_dict()
obj.to_json()
obj.short_name
obj.clean_name
def test_committee(self):
filer = models.Filer.objects.create(
name="<NAME>",
filer_id_raw=1,
xref_filer_id=1,
filer_type="cand",
party='16002',
status='A',
effective_date=datetime.now()
)
committee = models.Committee.objects.create(
name='<NAME>',
filer=filer,
filer_id_raw=filer.filer_id_raw,
xref_filer_id=filer.xref_filer_id,
committee_type=filer.filer_type,
party=filer.party,
status='Y',
level_of_government='40502',
effective_date=filer.effective_date,
)
committee.__unicode__()
def test_cycle(self):
pass
def test_filingperiod(self):
pass
def test_filing(self):
pass
def test_summary(self):
pass
def test_contribution(self):
pass
def test_office(self):
pass
def test_candidate(self):
pass
def test_proposition(self):
pass
def test_propositionfiler(self):
pass
|
186716
|
from rest_framework.mixins import RetrieveModelMixin
from rest_framework.viewsets import GenericViewSet
from .models import Block
from .serializers import BlockSerializer
class BlockViewSet(RetrieveModelMixin, GenericViewSet):
queryset = Block.objects
serializer_class = BlockSerializer
lookup_value_regex = '[^/]+' # Allow '.' for email-like guids.
def get_object(self):
identifier = self.kwargs.pop('pk')
self.lookup_field = 'pk' if identifier.isdigit() else 'guid'
self.kwargs[self.lookup_field] = identifier
return super().get_object()
|
186730
|
import sublime_plugin
class FileNameOnStatusBar(sublime_plugin.EventListener):
def on_activated(self, view):
path = view.file_name()
if path:
for folder in view.window().folders():
path = path.replace(folder + '/', '', 1)
view.set_status('file_name', path)
else:
view.set_status('file_name', 'untitled')
|
186764
|
import pylayers.simul.simultraj as st
from pylayers.measures.cormoran import *
import numpy as np
# load CorSer 6 (default)
C = CorSer(serie=6,day=11)
# create a Simulation from CorSer motion capture file
S = st.Simul(C,verbose=True)
# create a dictionnary from links
llinks = S.N.links['ieee802154']
link={'ieee802154':[]}
# select link of interest
# TorsoTopRight(6) - AP1(1,2,3,4)
#link['ieee802154'].append(S.N.links['ieee802154'][100])
#link['ieee802154'].append(S.N.links['ieee802154'][101])
#link['ieee802154'].append(S.N.links['ieee802154'][102])
#link['ieee802154'].append(S.N.links['ieee802154'][85])
# BackCenter(8) - AP1(1,2,3,4)
link['ieee802154'].append(S.N.links['ieee802154'][112])
#link['ieee802154'].append(S.N.links['ieee802154'][115])
#link['ieee802154'].append(S.N.links['ieee802154'][117])
#link['ieee802154'].append(S.N.links['ieee802154'][90])
# Max entropy link (104)
#link['ieee802154'].append(S.N.links['ieee802154'][104])#
#link['ieee802154'].append(S.N.links['ieee802154'][48])#
#link['ieee802154'].append(S.N.links['ieee802154'][36])# 15-7
#link['ieee802154'].append(S.N.links['ieee802154'][17])#
#link['ieee802154'].append(S.N.links['ieee802154'][8])#
#lt = C.tmocap[::10]
lt = C.tmocap[::500]
#S.run(links=link,t=lt)
dval = S.get_value(links=link,typ=['H'],t=0)
|
186768
|
from __future__ import print_function
import time
import numpy as np
from scipy.special import gammaln, psi
from six.moves import xrange
from .utils import write_top_words
from .formatted_logger import formatted_logger
eps = 1e-20
logger = formatted_logger('RelationalTopicModel', 'info')
class RelationalTopicModel:
""" implementation of relational topic model by Chang and Blei (2009)
I implemented the exponential link probability function in here
Attributes
----------
eta: ndarray, shape (n_topic)
coefficient of exponential function
rho: int
pseudo number of negative example
"""
def __init__(self, n_topic, n_doc, n_voca, alpha=0.1, rho=1000, **kwargs):
self.n_doc = n_doc
self.n_topic = n_topic
self.n_voca = n_voca
self.alpha = alpha
self.gamma = np.random.gamma(100., 1. / 100, [self.n_doc, self.n_topic])
self.beta = np.random.dirichlet([5] * self.n_voca, self.n_topic)
self.nu = 0
self.eta = np.random.normal(0., 1, self.n_topic)
self.phi = list()
self.pi = np.zeros([self.n_doc, self.n_topic])
self.rho = rho
self.verbose = kwargs.pop('verbose', True)
logger.info('Initialize RTM: num_voca:%d, num_topic:%d, num_doc:%d' % (self.n_voca, self.n_topic, self.n_doc))
def fit(self, doc_ids, doc_cnt, doc_links, max_iter=100):
for di in xrange(self.n_doc):
unique_word = len(doc_ids[di])
cnt = doc_cnt[di]
self.phi.append(np.random.dirichlet([10] * self.n_topic, unique_word).T) # list of KxW
self.pi[di, :] = np.sum(cnt * self.phi[di], 1) / np.sum(cnt * self.phi[di])
for iter in xrange(max_iter):
tic = time.time()
self.variation_update(doc_ids, doc_cnt, doc_links)
self.parameter_estimation(doc_links)
if self.verbose:
elbo = self.compute_elbo(doc_ids, doc_cnt, doc_links)
logger.info('[ITER] %3d,\tElapsed time: %.3f\tELBO: %.3f', iter, time.time()-tic, elbo)
def compute_elbo(self, doc_ids, doc_cnt, doc_links):
""" compute evidence lower bound for trained model
"""
elbo = 0
e_log_theta = psi(self.gamma) - psi(np.sum(self.gamma, 1))[:, np.newaxis] # D x K
log_beta = np.log(self.beta + eps)
for di in xrange(self.n_doc):
words = doc_ids[di]
cnt = doc_cnt[di]
elbo += np.sum(cnt * (self.phi[di] * log_beta[:, words])) # E_q[log p(w_{d,n}|\beta,z_{d,n})]
elbo += np.sum((self.alpha - 1.) * e_log_theta[di, :]) # E_q[log p(\theta_d | alpha)]
elbo += np.sum(self.phi[di].T * e_log_theta[di, :]) # E_q[log p(z_{d,n}|\theta_d)]
elbo += -gammaln(np.sum(self.gamma[di, :])) + np.sum(gammaln(self.gamma[di, :])) \
- np.sum((self.gamma[di, :] - 1.) * (e_log_theta[di, :])) # - E_q[log q(theta|gamma)]
elbo += - np.sum(cnt * self.phi[di] * np.log(self.phi[di])) # - E_q[log q(z|phi)]
for adi in doc_links[di]:
elbo += np.dot(self.eta,
self.pi[di] * self.pi[adi]) + self.nu # E_q[log p(y_{d1,d2}|z_{d1},z_{d2},\eta,\nu)]
return elbo
def variation_update(self, doc_ids, doc_cnt, doc_links):
# update phi, gamma
e_log_theta = psi(self.gamma) - psi(np.sum(self.gamma, 1))[:, np.newaxis]
new_beta = np.zeros([self.n_topic, self.n_voca])
for di in xrange(self.n_doc):
words = doc_ids[di]
cnt = doc_cnt[di]
doc_len = np.sum(cnt)
new_phi = np.log(self.beta[:, words] + eps) + e_log_theta[di, :][:, np.newaxis]
gradient = np.zeros(self.n_topic)
for ai in doc_links[di]:
gradient += self.eta * self.pi[ai, :] / doc_len
new_phi += gradient[:, np.newaxis]
new_phi = np.exp(new_phi)
new_phi = new_phi / np.sum(new_phi, 0)
self.phi[di] = new_phi
self.pi[di, :] = np.sum(cnt * self.phi[di], 1) / np.sum(cnt * self.phi[di])
self.gamma[di, :] = np.sum(cnt * self.phi[di], 1) + self.alpha
new_beta[:, words] += (cnt * self.phi[di])
self.beta = new_beta / np.sum(new_beta, 1)[:, np.newaxis]
def parameter_estimation(self, doc_links):
# update eta, nu
pi_sum = np.zeros(self.n_topic)
num_links = 0.
for di in xrange(self.n_doc):
for adi in doc_links[di]:
pi_sum += self.pi[di, :] * self.pi[adi, :]
num_links += 1
num_links /= 2. # divide by 2 for bidirectional edge
pi_sum /= 2.
pi_alpha = np.zeros(self.n_topic) + self.alpha / (self.alpha * self.n_topic) * self.alpha / (self.alpha * self.n_topic)
self.nu = np.log(num_links - np.sum(pi_sum)) - np.log(
self.rho * (self.n_topic - 1) / self.n_topic + num_links - np.sum(pi_sum))
self.eta = np.log(pi_sum) - np.log(pi_sum + self.rho * pi_alpha) - self.nu
def save_model(self, output_directory, vocab=None):
import os
if not os.path.exists(output_directory):
os.mkdir(output_directory)
np.savetxt(output_directory + '/eta.txt', self.eta, delimiter='\t')
np.savetxt(output_directory + '/beta.txt', self.beta, delimiter='\t')
np.savetxt(output_directory + '/gamma.txt', self.gamma, delimiter='\t')
with open(output_directory + '/nu.txt', 'w') as f:
f.write('%f\n' % self.nu)
if vocab is not None:
write_top_words(self.beta, vocab, output_directory + '/top_words.csv')
|
186779
|
from PyObjCTools.TestSupport import *
import socket, time, struct
from CoreFoundation import *
import CoreFoundation
import sys
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
try:
buffer
except NameError:
buffer = memoryview
def onTheNetwork():
try:
socket.gethostbyname('www.apple.com')
except socket.gaierror:
return False
return True
class TestSocket (TestCase):
def testTypes(self):
self.assertIsCFType(CFSocketRef)
def testTypeID(self):
self.assertIsInstance(CFSocketGetTypeID(), (int, long))
def testConstants(self):
self.assertEqual(kCFSocketSuccess , 0)
self.assertEqual(kCFSocketError , -1)
self.assertEqual(kCFSocketTimeout , -2)
self.assertEqual(kCFSocketNoCallBack , 0)
self.assertEqual(kCFSocketReadCallBack , 1)
self.assertEqual(kCFSocketAcceptCallBack , 2)
self.assertEqual(kCFSocketDataCallBack , 3)
self.assertEqual(kCFSocketConnectCallBack , 4)
self.assertEqual(kCFSocketWriteCallBack , 8)
self.assertEqual(kCFSocketAutomaticallyReenableReadCallBack , 1)
self.assertEqual(kCFSocketAutomaticallyReenableAcceptCallBack , 2)
self.assertEqual(kCFSocketAutomaticallyReenableDataCallBack , 3)
self.assertEqual(kCFSocketAutomaticallyReenableWriteCallBack , 8)
self.assertEqual(kCFSocketCloseOnInvalidate , 128)
self.assertIsInstance(kCFSocketCommandKey, unicode)
self.assertIsInstance(kCFSocketNameKey, unicode)
self.assertIsInstance(kCFSocketValueKey, unicode)
self.assertIsInstance(kCFSocketResultKey, unicode)
self.assertIsInstance(kCFSocketErrorKey, unicode)
self.assertIsInstance(kCFSocketRegisterCommand, unicode)
self.assertIsInstance(kCFSocketRetrieveCommand, unicode)
self.assertEqual(kCFSocketLeaveErrors, 64)
def testStructs(self):
o = CFSocketSignature()
self.assertHasAttr(o, 'protocolFamily')
self.assertHasAttr(o, 'socketType')
self.assertHasAttr(o, 'protocol')
self.assertHasAttr(o, 'address')
def testNameRegistry(self):
p1 = CFSocketGetDefaultNameRegistryPortNumber()
self.assertIsInstance(p1, (int, long))
CFSocketSetDefaultNameRegistryPortNumber(p1+1)
p2 = CFSocketGetDefaultNameRegistryPortNumber()
self.assertIsInstance(p2, (int, long))
self.assertEqual(p2, p1+1)
CFSocketSetDefaultNameRegistryPortNumber(p1)
@onlyIf(onTheNetwork(), "cannot test without internet connection")
def testSocketFunctions(self):
data = {}
state = []
def callback(sock, kind, address, data, info):
state.append((sock, kind, address, data, info))
sock = CFSocketCreate(None, socket.AF_INET, socket.SOCK_STREAM, 0,
kCFSocketReadCallBack|kCFSocketWriteCallBack,
callback, data)
self.assertIsInstance(sock, CFSocketRef)
localaddr = struct.pack('>BBHBBBB', 16, socket.AF_INET, 9425, 127, 0, 0, 1)
localaddr += b'\0' * 8
if sys.version_info[0] == 2:
localaddr = buffer(localaddr)
err = CFSocketSetAddress(sock, localaddr)
self.assertEqual(err, kCFSocketSuccess)
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
try:
sock = CFSocketCreateWithNative(None, sd.fileno(),
kCFSocketReadCallBack|kCFSocketWriteCallBack,
callback, data)
self.assertIsInstance(sock, CFSocketRef)
n = CFSocketGetNative(sock)
self.assertIsInstance(n, (int, long))
self.assertEqual(n, sd.fileno())
ctx = CFSocketGetContext(sock, None)
self.assertIs(ctx, data)
flags = CFSocketGetSocketFlags(sock)
self.assertIsInstance(flags, (int, long))
CFSocketSetSocketFlags(sock, kCFSocketAutomaticallyReenableReadCallBack|kCFSocketAutomaticallyReenableAcceptCallBack)
flags2 = CFSocketGetSocketFlags(sock)
self.assertIsInstance(flags2, (int, long))
self.assertEqual(flags2, kCFSocketAutomaticallyReenableReadCallBack|kCFSocketAutomaticallyReenableAcceptCallBack)
# Note: I don't expect anyone to actually use this api, building
# struct sockaddr buffers by hand is madness in python.
ip = socket.gethostbyname('www.apple.com')
ip = map(int, ip.split('.'))
sockaddr = struct.pack('>BBHBBBB', 16, socket.AF_INET, 80, *ip)
sockaddr += b'\0' * 8
if sys.version_info[0] == 2:
sockaddr = buffer(sockaddr)
e = CFSocketConnectToAddress(sock, sockaddr, 1.0)
self.assertIsInstance(e, (int, long))
self.assertEqual(e, kCFSocketSuccess)
self.assertResultIsCFRetained(CFSocketCopyPeerAddress)
addr = CFSocketCopyPeerAddress(sock)
self.assertIsInstance(addr, CFDataRef)
self.assertResultIsCFRetained(CFSocketCopyAddress)
addr = CFSocketCopyAddress(sock)
self.assertIsInstance(addr, CFDataRef)
CFSocketDisableCallBacks(sock, kCFSocketReadCallBack|kCFSocketAcceptCallBack)
CFSocketEnableCallBacks(sock, kCFSocketReadCallBack|kCFSocketAcceptCallBack)
if sys.version_info[0] == 2:
err = CFSocketSendData(sock, None, buffer("GET / HTTP/1.0"), 1.0)
else:
err = CFSocketSendData(sock, None, b"GET / HTTP/1.0", 1.0)
self.assertEqual(err, kCFSocketSuccess)
ok = CFSocketIsValid(sock)
self.assertIs(ok, True)
CFSocketInvalidate(sock)
self.assertResultIsBOOL(CFSocketIsValid)
ok = CFSocketIsValid(sock)
self.assertIs(ok, False)
localaddr = struct.pack('>BBHBBBB', 16, socket.AF_INET, 9424, 127, 0, 0, 1)
localaddr += b'\0' * 8
signature = CFSocketSignature(
socket.AF_INET,
socket.SOCK_STREAM,
0,
buffer(localaddr))
sock = CFSocketCreateWithSocketSignature(None, signature,
kCFSocketReadCallBack|kCFSocketWriteCallBack,
callback, data)
self.assertIsInstance(sock, CFSocketRef)
signature = CFSocketSignature(
socket.AF_INET,
socket.SOCK_STREAM,
0,
buffer(sockaddr))
sock = CFSocketCreateConnectedToSocketSignature(None, signature,
kCFSocketReadCallBack|kCFSocketWriteCallBack,
callback, data, 1.0)
self.assertIsInstance(sock, CFSocketRef)
self.assertResultIsCFRetained(CFSocketCreateRunLoopSource)
src = CFSocketCreateRunLoopSource(None, sock, 0)
self.assertIsInstance(src, CFRunLoopSourceRef)
finally:
sd.close()
def testSocketNameServer(self):
# The documentation says:
# Name server functionality is currently inoperable in Mac OS X.
#
# Therefore these functions are not available from Python
self.assertNotHasAttr(CoreFoundation, 'CFSocketCopyRegisteredSocketSignature')
self.assertNotHasAttr(CoreFoundation, 'CFSocketCopyRegisteredValue')
self.assertNotHasAttr(CoreFoundation, 'CFSocketRegisterSocketSignature')
self.assertNotHasAttr(CoreFoundation, 'CFSocketRegisterValue')
self.assertNotHasAttr(CoreFoundation, 'CFSocketUnregister')
if __name__ == "__main__":
main()
|
186780
|
from . import PynbodyPropertyCalculation
class Masses(PynbodyPropertyCalculation):
names = "finder_mass"
def calculate(self, halo, existing_properties):
return halo['mass'].sum()
class MassBreakdown(PynbodyPropertyCalculation):
names = "finder_dm_mass", "finder_star_mass", "finder_gas_mass"
def calculate(self, halo, existing_properties):
return halo.dm['mass'].sum(), halo.star['mass'].sum(), halo.gas['mass'].sum()
|
186808
|
from django.apps import AppConfig
class StorageConfig(AppConfig):
default_auto_field = "django.db.models.AutoField"
name = "storage"
|
186814
|
from .attr_snippets import AttributeSnippets
from .counterfact import CounterFactDataset
from .knowns import KnownsDataset
from .zsre import MENDQADataset
from .tfidf_stats import get_tfidf_vectorizer
|
186864
|
from btypes.big_endian import *
from j3d.animation import Animation,IncompatibleAnimationError
class Header(Struct):
magic = ByteString(4)
section_size = uint32
loop_mode = uint8
__padding__ = Padding(1)
duration = uint16
shape_animation_count = uint16
show_count = uint16
show_selection_offset = uint32
show_offset = uint32
class ShowSelection(Struct):
count = uint16
first = uint16
class ShapeAnimation: pass
class ShapeVisibilityAnimation(Animation):
def __init__(self,duration,loop_mode,shape_animations):
super().__init__(duration,loop_mode)
self.shape_animations = shape_animations
def attach(self,model):
if len(self.shape_animations) != len(model.shapes):
raise IncompatibleAnimationError()
self.time = -1
self.model = model
def update_model(self):
for shape,shape_animation in zip(self.model.shapes,self.shape_animations):
if self.time >= len(shape_animation.shows):
show = shape_animation.shows[-1]
else:
show = shape_animation.shows[self.time]
shape.hide = not show
def unpack(stream):
base = stream.tell()
header = Header.unpack(stream)
if header.magic != b'VAF1':
raise FormatError('invalid magic')
stream.seek(base + header.show_selection_offset)
show_selections = [ShowSelection.unpack(stream) for _ in range(header.shape_animation_count)]
stream.seek(base + header.show_offset)
shows = [bool8.unpack(stream) for _ in range(header.show_count)]
shape_animations = [ShapeAnimation() for _ in range(header.shape_animation_count)]
for shape_animation,show_selection in zip(shape_animations,show_selections):
shape_animation.shows = shows[show_selection.first:show_selection.first + show_selection.count]
stream.seek(base + header.section_size)
return ShapeVisibilityAnimation(header.duration,header.loop_mode,shape_animations)
|
186902
|
import os
# linesep = os.linesep.encode('ascii')
linesep = b'\n'
def dir_parts(dir: str) -> tuple[str]:
return tuple(os.path.normpath(dir).split(os.sep))
def is_in_dir(dir: str, in_: tuple[str]) -> bool:
return tuple(dir.split(os.sep, len(in_)))[:len(in_)] == in_
def is_in_dirs(dir: str, in_: tuple[tuple[str]]) -> bool:
return any(is_in_dir(dir, i) for i in in_)
def add_trailing_newlines(path: str) -> bool:
with open(path, 'r+b') as fp:
fp.seek(-len(linesep), 2)
if fp.read(len(linesep)) != linesep:
fp.write(linesep)
return True
return False
skip_parts = (
'.git',
'bin',
'dist',
'cache',
'__pycache__',
'lib',
'sorting_networks',
'src/main/resources',
'wrapper'
)
skip_parts = tuple(dir_parts(d) for d in skip_parts)
for (dir, dirs, files) in os.walk('.'):
if dir == '.':
dir = ''
else:
dir = os.path.normpath(dir)
if is_in_dirs(dir, skip_parts):
continue
for file in files:
file = os.path.join(dir, file)
if add_trailing_newlines(file):
print('Wrote ', file)
else:
print('Skipped', file)
|
186923
|
import unittest
from metalearn.metafeatures.base import collectordict
class CollectorDictTestCase(unittest.TestCase):
def test_no_init_args(self):
try:
cd = collectordict({'a': 1})
self.fail('collectordict should have failed when passed init args')
except TypeError as e:
pass
def test_no_duplicate_setter(self):
cd = collectordict()
cd[1] = 1
try:
cd[1] = 2
self.fail('collectordict should have raised an error when setting an existing key')
except LookupError as e:
pass
def test_no_duplicates_in_update(self):
cd = collectordict()
cd[1] = 1
try:
cd.update({1:2})
self.fail('collectordict should have raised an error when updating with an existing key')
except LookupError as e:
pass
|
186984
|
import altair as alt
from typing import Union, List
import pandas as pd
tooltipList = List[alt.Tooltip]
def _preprocess_data(data):
for indx in ("index", "columns"):
if isinstance(getattr(data, indx), pd.MultiIndex):
setattr(
data,
indx,
pd.Index(
[str(i) for i in getattr(data, indx)], name=getattr(data, indx).name
),
)
# Column names must all be strings.
return data.rename(columns=str).copy()
def _process_tooltip(tooltip):
"""converts tooltip els to string if needed"""
if isinstance(tooltip, list) and not isinstance(tooltip[0], alt.Tooltip):
tooltip = [str(el) for el in tooltip]
return tooltip
def scatter_matrix(
df,
color: Union[str, None] = None,
alpha: float = 1.0,
tooltip: Union[List[str], tooltipList, None] = None,
**kwargs
) -> alt.Chart:
""" plots a scatter matrix
At the moment does not support neither histogram nor kde;
Uses f-f scatterplots instead. Interactive and with a cusotmizable
tooltip
Parameters
----------
df : DataFame
DataFame to be used for scatterplot. Only numeric columns will be included.
color : string [optional]
Can be a column name or specific color value (hex, webcolors).
alpha : float
Opacity of the markers, within [0,1]
tooltip: list [optional]
List of specific column names or alt.Tooltip objects. If none (default),
will show all columns.
"""
dfc = _preprocess_data(df)
tooltip = _process_tooltip(tooltip) or dfc.columns.tolist()
cols = dfc._get_numeric_data().columns.tolist()
chart = (
alt.Chart(dfc)
.mark_circle()
.encode(
x=alt.X(alt.repeat("column"), type="quantitative"),
y=alt.X(alt.repeat("row"), type="quantitative"),
opacity=alt.value(alpha),
tooltip=tooltip,
)
.properties(width=150, height=150)
)
if color:
color = str(color)
if color in dfc:
color = alt.Color(color)
if "colormap" in kwargs:
color.scale = alt.Scale(scheme=kwargs.get("colormap"))
else:
color = alt.value(color)
chart = chart.encode(color=color)
return chart.repeat(row=cols, column=cols).interactive()
|
186989
|
import extractInfo
import detectPhishing
import csv
import tkinter as tk
def handleClick(username,password):
Status=[]
#Files=extractInfo.extractFromEmail('<EMAIL>','<PASSWORD>')
Files=extractInfo.extractFromEmail(username,password)
extractInfo.extract_URL_From_Sub(Files)
if Files:
with open('Data/mailout.txt', mode='r') as infile:
reader = csv.reader(infile)
for row in reader:
print(row)
with open("Data/Status.txt", "a+") as fh_out:
res=detectPhishing.validateURL(row)
Status.append(res)
fh_out.write(res+'\n')
fh_out.close()
window=tk.Tk()
l1=tk.Label(window,text='Email-Id:')
l2=tk.Label(window,text='Password')
t1=tk.Entry(window,textvariable=tk.StringVar())
t2=tk.Entry(window,show="*",textvariable=tk.StringVar())
print('$$$$$$$$$$$$$$$$$$$$$$$$$$'+t1.get())
b1=tk.Button(window,text="Check",command=lambda: handleClick(t1.get(),t2.get()))
l1.grid(row=0,column=0)
t1.grid(row=0,column=1)
l2.grid(row=1,column=0)
t2.grid(row=1,column=1)
b1.grid(row=2,column=1)
window.mainloop()
|
187037
|
from __future__ import print_function
import io
import os.path
import pickle
import sys
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
TOKEN = '<PASSWORD>/token.pickle'
DOWNLOAD_LINK = "travis_files/download_link.txt"
class GoogleDriveService:
def __init__(self):
self.creds = self._auth()
self.google_drive_service = build('drive', 'v3', credentials=self.creds)
def _auth(self):
"""
Authenticate google drive credentails.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOKEN):
with open(TOKEN, 'rb') as token:
creds = pickle.load(token)
else:
sys.exit("can't find token.pickle")
return creds
def download_result(self, file_id, file_name):
file = self.google_drive_service.files().get(fileId=file_id, fields='webContentLink').execute()
print('Download test result at: %s' % file['webContentLink'])
file_request = self.google_drive_service.files().get_media(fileId=file_id)
if file_request:
# there are results
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, file_request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
with io.open('travis_files/%s' % file_name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
passed = file_name.split(',')[3] == "True" # extract the boolean from file name
return passed
else:
sys.exit('ERROR download file: %s with id: %s' % (file_id, file_name))
def find_file_id(self, sha, started_time):
"""
Looking for the commit id in google drive
:param sha: the commit id to look for.
:return: The id and the file name as in google drive.
"""
results = self.google_drive_service.files().list(
fields="nextPageToken, files(id, name)").execute()
files = results.get('files', [])
# looking for the file with that sha(file format: pr,sha,date,passed.html)
for file in files:
if sha in file['name'] and started_time in file['name']:
return file
return False
|
187038
|
import math
def simple():
'''
3.14 * 0.5 = 1.57
1.57 / 40 = 0.03925
take half => 0.019625
20 mil diameter pads
rond
6 mil fab rule?
20 mil hole, 10 mil ring each side
should be fine
eh pretty tight
lets shrink slightly
'''
D = 0.5
R = D / 2
PINS = 40
for i in xrange(PINS):
pin = i + 1
angle = (i + 0.5) * 2 * math.pi / PINS
if angle > 2 * math.pi:
angle -= 2 * math.pi
angle = -angle
x = R * math.sin(angle)
y = R * math.cos(angle)
print '% 3d: % 4d x % 4d y % 5d r' % (pin, 1000 * x, 1000 * y, -360 * angle / 3.14 / 2 )
'''
autogen
pcad.lia simple
.dxf may also work
figured out how to import .lia
do that
OSH rules
https://oshpark.com/guidelines
6 mil minimum trace width
6 mil minimum spacing
at least 15 mil clearances from traces to the edge of the board
13 mil minimum drill size
7 mil minimum annular ring
pcbway rules
http://www.pcbway.com/capabilities.html
drill size
Min drill size is 0.2mm,
7.9 mil
max drill is 6.3mm.
Any holes greater than 6.3mm or smaller than 0.3mm will be subject to extra charges.
6.3: 248 mil
0.3: 11.8 mil
Min Width of Annular Ring
0.15mm(6mil)
Minimum Diameter of Plated Half Holes
0.6mm
eeeeh
that messes up what I'm trying to do
23.6 mil
actually maybe its okay
so in summary
current
hole: 13
annular ring: 7
net size: 27
move to
hole: 12
annular ring: 6
net size: 24
eliminated most but not all
need 1 more mil
hole 12
0.5 * 3.14159 = 1.570795
1.570795 / 28 = 0.056099821
56 mil
6 mil spacing min
7 mil minimum annular ring
0.1" fencepost ref
40
68
9 mil ring
say 9 mil spacing
9 mil ring
56 - 9 - 2 * 9 = 29 hole
wait no hole
just do it evenly
56/2 = 28
'''
def header():
return '''\
ACCEL_ASCII "POINTS.LIA"
(asciiHeader
(asciiVersion 3 0)
(timeStamp 2017 1 7 0 54 1)
(program "points.py" "1.0.0")
(copyright "points.py")
(headerString "")
(fileUnits Mil)
(guidString "{00000000-0000-0000-0000-000000000000}")
)
(library "Library_1"
(padStyleDef "(Default)"
(holeDiam 30mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Oval) (shapeWidth 60mil) (shapeHeight 60mil) )
(padShape (layerNumRef 2) (padShapeType Oval) (shapeWidth 60mil) (shapeHeight 60mil) )
(padShape (layerType Signal) (padShapeType Oval) (shapeWidth 60mil) (shapeHeight 60mil) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(padStyleDef "EF20X60TOP1"
(holeDiam 0mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Oval) (shapeWidth 20mil) (shapeHeight 60mil) )
(padShape (layerNumRef 2) (padShapeType Ellipse) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType Signal) (padShapeType Oval) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(padStyleDef "P:EX30Y30D201"
(holeDiam 12mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Oval) (shapeWidth 24mil) (shapeHeight 24mil) )
(padShape (layerNumRef 2) (padShapeType Oval) (shapeWidth 24mil) (shapeHeight 24mil) )
(padShape (layerType Signal) (padShapeType Oval) (shapeWidth 24mil) (shapeHeight 24mil) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(padStyleDef "RECT28"
(holeDiam 0mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Rect) (shapeWidth 28mil) (shapeHeight 28mil) )
(padShape (layerNumRef 2) (padShapeType Ellipse) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType Signal) (padShapeType Rect) (shapeWidth 28mil) (shapeHeight 28mil) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(viaStyleDef "(Default)"
(holeDiam 28mil)
(startRange 1)
(endRange 2)
(viaShape (layerNumRef 1) (viaShapeType Ellipse) (shapeWidth 50mil) (shapeHeight 50mil) )
(viaShape (layerNumRef 2) (viaShapeType Ellipse) (shapeWidth 50mil) (shapeHeight 50mil) )
(viaShape (layerType Signal) (viaShapeType Ellipse) (shapeWidth 50mil) (shapeHeight 50mil) )
(viaShape (layerType Plane) (viaShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(viaShape (layerType NonSignal) (viaShapeType Ellipse) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(textStyleDef "(Default)"
(font
(fontType Stroke)
(fontFamily Modern)
(fontFace "Quality")
(fontHeight 80mil)
(strokeWidth 10mil)
)
(textStyleAllowTType False)
(textStyleDisplayTType False)
)
(textStyleDef "(DefaultTTF)"
(font
(fontType Stroke)
(fontFamily SanSerif)
(fontFace "QUALITY")
(fontHeight 100.0)
(strokeWidth 10.0)
)
(font
(fontType TrueType)
(fontFamily Modern)
(fontFace "Arial")
(fontHeight 125.0)
(strokeWidth 0.19843 mm)
(fontWeight 400)
(fontCharSet 0)
(fontOutPrecision 7)
(fontClipPrecision 32)
(fontQuality 1)
(fontPitchAndFamily 6)
)
(textStyleAllowTType True)
(textStyleDisplayTType True)
)
(patternDefExtended "ROUND40-0.5_1"
(originalName "ROUND40-0.5")
(patternGraphicsNameRef "Primary")
(patternGraphicsDef
(patternGraphicsNameDef "Primary")
(multiLayer
'''
def footer(pins):
s = '''\
)
(layerContents (layerNumRef 10)
(arc (pt 0mil 0mil) (radius 250mil) (startAngle 0.0) (sweepAngle 360.0) (width 10mil) )
)
(layerContents (layerNumRef 6)
(attr "RefDes" "" (pt -266.767mil 294.091mil) (isVisible True) (textStyleRef "(Default)") )
(attr "Type" "" (pt -266.767mil -389mil) (isVisible True) (textStyleRef "(Default)") )
)
)
)
(compDef "ROUND40-0.5_1"
(originalName "ROUND40-0.5")
(compHeader
(sourceLibrary "")
(numPins 40)
(numParts 1)
(alts (ieeeAlt False) (deMorganAlt False))
(refDesPrefix "")
)
'''
for i in xrange(pins):
'''
(compPin "1" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
(compPin "2" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
(compPin "3" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
...
(compPin "40" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
'''
pin = i + 1
s += ' (compPin "%d" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )\n' % (pin,)
s += '''\
(attachedPattern (patternNum 1) (patternName "ROUND40-0.5")
(numPads 40)
(padPinMap
'''
for i in xrange(pins):
'''
(padNum 1) (compPinRef "1")
(padNum 2) (compPinRef "2")
(padNum 3) (compPinRef "3")
...
(padNum 40) (compPinRef "40")
'''
pin = i + 1
s += ' (padNum %d) (compPinRef "%d")\n' % (pin, pin)
s += '''\
)
)
)
)
'''
return s
def auto(PINS=40, D=0.5, padStyleRef="EF20X60TOP1"):
R = D / 2
s = header()
for i in xrange(PINS):
pin = i + 1
angler = (i + 0.5) * 2 * math.pi / PINS
angler = -angler
angled = angler * 180 / math.pi
x = R * math.sin(angler)
y = R * math.cos(angler)
'''
(pad (padNum 1) (padStyleRef "EF20X60TOP1") (pt -19mil 249mil) (rotation 4.0)(defaultPinDes "1"))
(pad (padNum 2) (padStyleRef "EF20X60TOP1") (pt -58mil 243mil) (rotation 13.0)(defaultPinDes "2"))
(pad (padNum 3) (padStyleRef "EF20X60TOP1") (pt -95mil 230mil) (rotation 22.0)(defaultPinDes "3"))
'''
# Rotation CW
# Need to counter above rotation
l = ' (pad (padNum %d) (padStyleRef "%s") (pt %dmil %dmil) (rotation %0.1f)(defaultPinDes "%d"))\n' % (pin, padStyleRef, 1000 * x, 1000 * y, -angled, pin)
s += l
s += footer(PINS)
print s
# Original design
# Elongated oval pad sliced in the middle
#auto(PINS=40, D=0.5, padStyleRef="EF20X60TOP1")
# castillation
#auto(PINS=40, D=0.5, padStyleRef="P:EX30Y30D201")
# diameter
# pad size/2
# solder mask expansion
# edge clearance
#auto(PINS=28, D=0.5+0.056/2+2*0.004+2*0.006, padStyleRef="RECT28")
auto(PINS=28, D=0.45+0.056/2+2*0.004+2*0.006, padStyleRef="RECT28")
|
187096
|
import os
import chainer
from chainer import training
from chainer.training import extensions
from qanta.buzzer.nets import RNNBuzzer, MLPBuzzer, LinearBuzzer
from qanta.buzzer.util import read_data, convert_seq
from qanta.util.constants import BUZZER_TRAIN_FOLD, BUZZER_DEV_FOLD
def main(model):
train = read_data(fold=BUZZER_TRAIN_FOLD)
valid = read_data(fold=BUZZER_DEV_FOLD)
print("# train data: {}".format(len(train)))
print("# valid data: {}".format(len(valid)))
train_iter = chainer.iterators.SerialIterator(train, 64)
valid_iter = chainer.iterators.SerialIterator(
valid, 64, repeat=False, shuffle=False
)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(1e-4))
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=convert_seq, device=0
)
trainer = training.Trainer(updater, (20, "epoch"), out=model.model_dir)
trainer.extend(
extensions.Evaluator(valid_iter, model, converter=convert_seq, device=0)
)
record_trigger = training.triggers.MaxValueTrigger(
"validation/main/accuracy", (1, "epoch")
)
trainer.extend(
extensions.snapshot_object(model, "buzzer.npz"), trigger=record_trigger
)
trainer.extend(extensions.LogReport())
trainer.extend(extensions.ProgressBar())
trainer.extend(
extensions.PrintReport(
[
"epoch",
"main/loss",
"validation/main/loss",
"main/accuracy",
"validation/main/accuracy",
"elapsed_time",
]
)
)
if not os.path.isdir(model.model_dir):
os.mkdir(model.model_dir)
trainer.run()
if __name__ == "__main__":
# model = LinearBuzzer(n_input=22, n_layers=1, n_hidden=50, n_output=2, dropout=0.4)
# model = MLPBuzzer(n_input=22, n_layers=1, n_hidden=50, n_output=2, dropout=0.4)
model = RNNBuzzer(n_input=22, n_layers=1, n_hidden=50, n_output=2, dropout=0.4)
chainer.backends.cuda.get_device_from_id(0).use()
model.to_gpu()
main(model)
|
187119
|
class DummyScheduler(object):
def __init__(self, optimizer):
pass
def step(self):
pass
|
187121
|
from ucate.application.workflows.tarnet import train as train_tarnet
from ucate.application.workflows.tlearner import train as train_tlearner
from ucate.application.workflows.cevae import train as train_cevae
from ucate.application.workflows.evaluation import evaluate
from ucate.application.workflows.evaluation import summarize
from ucate.application.workflows.evaluation import build_summary
|
187128
|
import unittest
import qnet
import qnetu
import numpy
import numpy.random
import mytime
import netutils
import estimation
import yaml
import arrivals
import sampling
import qstats
import sys
class TestLikDelta (unittest.TestCase):
def test_mm1_delta (self):
sampling.set_seed(68310)
nreps = 10
ntasks = 10
pct = 0.5
net = self.mm1
self.do_test_delta_internal (net, nreps, ntasks, pct)
def test_mmk_delta (self):
sampling.set_seed(68310)
nreps = 10
ntasks = 10
pct = 0.5
net = self.mmk
self.do_test_delta_internal (net, nreps, ntasks, pct)
def test_mmrss_delta (self):
sampling.set_seed(68310)
nreps = 10
ntasks = 100
pct = 0.25
net = self.mmrss
self.do_test_delta_internal (net, nreps, ntasks, pct)
def do_test_delta_internal (self, net, nreps, ntasks, pct):
for ri in range(nreps):
arrv = net.sample (ntasks)
obs = arrv.subset_by_task (pct)
samples = net.slice_resample (obs, 0, 5)
arrv_from = samples[len(samples)-1]
print "Computing LIK0"
lik0 = net.log_prob (arrv_from)
for e in arrv_from:
if not e.obs_d:
# print "Testing evt ", e
dfn = qnet.GGkGibbs(net, arrv_from, e, lik0).dfn()
d0 = e.d
d_test = [ d0+delta for delta in [ -0.5, -0.1, 0.1, 0.5, 1.0, 1.5, 3.0 ] ]
for d1 in d_test:
# print "Testing departure ", d1
lik_incremental = dfn(d1)
if numpy.isinf (lik_incremental): continue # probably right
lik_true = self.compute_full_lik (net, arrv_from, e, d1)
print "%d %.4f %.4f %.4f %.4f" % (e.eid, d0, d1, lik_incremental, lik_true)
if numpy.isinf(lik_true):
self.assertTrue (numpy.isinf(lik_incremental))
else:
self.assertAlmostEquals (lik_true, lik_incremental, 5)
def compute_full_lik (self, net, arrv0, evt, d):
arrv = arrv0.duplicate()
dl0 = evt.queue().pyDiffListForDeparture (evt, d)
evt_next = evt.next_by_task()
if evt_next:
dl0.extend (evt_next.queue().pyDiffListForArrival(evt_next, d))
arrv.applyDiffList (dl0, 0)
return net.log_prob (arrv)
mm1_text = """
states:
- name: INITIAL
queues: [ INITIAL ]
successors: [ TIER1 ]
initial: TRUE
- name: TIER1
queues: [ WEB1, WEB2 ]
successors: [ TIER2 ]
- name: TIER2
queues: [ APP1 ]
queues:
- { name: INITIAL, service: [M, 10.0] }
- { name: WEB1, service: [M, 3.0] }
- { name: WEB2, service: [M, 3.0] }
- { name: APP1, service: [M, 8.0] }
"""
mmk_text = """
states:
- name: INITIAL
queues: [ INITIAL ]
successors: [ TIER1 ]
initial: TRUE
- name: TIER1
queues: [ WEB1, WEB2 ]
successors: [ TIER2 ]
- name: TIER2
queues: [ APP1 ]
queues:
- { name: INITIAL, service: [M, 5.0] }
- { name: WEB1, service: [M, 3.0], processors: 3 }
- { name: WEB2, service: [M, 3.0], processors: 4 }
- { name: APP1, service: [M, 8.0], processors: 2 }
"""
mmrss_text = """
states:
- name: INITIAL
queues: [ INITIAL ]
successors: [ TIER1 ]
initial: TRUE
- name: TIER1
queues: [ WEB1, WEB2 ]
successors: [ TIER2 ]
- name: TIER2
queues: [ APP1, APP2 ]
successors: [ TIER3 ]
- name: TIER3
queues: [ DB1, DB2 ]
queues:
- { name: INITIAL, service: [M, 10.0] }
- { name: WEB1, service: [M, 15.0], type: GG1R }
- { name: WEB2, service: [M, 17.0], type: GG1R }
- { name: APP1, service: [M, 10.0], type: GG1R }
- { name: APP2, service: [M, 10.0], type: GG1R }
- { name: DB1, service: [M, 7.0], type: GG1R }
- { name: DB2, service: [M, 7.0], type: GG1R }
"""
def setUp (self):
self.mm1 = qnetu.qnet_from_text (TestLikDelta.mm1_text)
self.mmk = qnetu.qnet_from_text (TestLikDelta.mmk_text)
self.mmrss = qnetu.qnet_from_text (TestLikDelta.mmrss_text)
def main():
if len(sys.argv) > 1:
for test_name in sys.argv[1:]:
suite = unittest.TestLoader().loadTestsFromName("test_likdelta.TestLikDelta.%s" % (test_name,))
unittest.TextTestRunner(verbosity=2).run(suite)
else:
unittest.main()
if __name__ == "__main__":
main()
|
187193
|
import nltk
import codecs
import os
import numpy as np
from collections import defaultdict
def data_directory():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
stemmer = nltk.stem.PorterStemmer()
manual_dir = os.path.join(data_directory(), 'manual_keywords_merged')
eval_dir = os.path.join(data_directory(), 'keywords_eval_dir')
methods = ['tfidf_nodruid_nofilter_nostopwords', 'tfidf_nodruid_nofilter', 'tfidf_nodruid', 'tfidf', 'habibi75', 'habibi75_prep', 'proposed','proposed_d05','proposed_d07','proposed_nodruid','habibi75_orig_prep','proposed_orig', 'proposed_orig_d05', 'proposed_orig_d07', 'proposed_orig_nodruid']
#methods = ['habibi75', 'habibi75_orig', 'habibi75_prep', 'habibi75_orig_prep']
pretty_method_names = {
'tfidf_nodruid_nofilter_nostopwords' : 'TF-IDF baseline, no multiwords, no filtering',
'tfidf_nodruid_nofilter' : 'TF-IDF baseline, no multiwords, only stopword filtering',
'tfidf_nodruid' : 'TF-IDF baseline, no multiwords, full filtering',
'tfidf' : 'TF-IDF baseline, with DRUID multiwords, full filtering',
'tfidf_orig' : 'TF-IDF baseline on gold transcriptions, with DRUID multiwords, full filtering',
'habibi75' : 'Habibi and PB',
'habibi75_orig' : 'Habibi and PB, gold transcriptions',
'habibi75_prep' : 'Habibi and PB, our preprocessing',
'proposed_nodruid' : 'Our proposed method, without DRUID multiwords',
'proposed' : 'Our proposed method, with DRUID multiwords (c=0.3)',
'proposed_d05' : 'Our proposed method, with DRUID multiwords (c=0.5)',
'proposed_d07' :'Our proposed method, with DRUID multiwords (c=0.7)',
'habibi75_orig_prep' : 'Habibi and PB, our preprocessing, gold transcriptions',
'proposed_orig' : 'Our proposed method on gold transcriptions, with DRUID multiwords (c=0.3)',
'proposed_orig_d05' : 'Our proposed method on gold transcriptions, with DRUID multiwords (c=0.5)',
'proposed_orig_d07' : 'Our proposed method on gold transcriptions, with DRUID multiwords (c=0.7)',
'proposed_orig_nodruid' : 'Our proposed method on gold transcriptions, without DRUID multiwords'
}
filenames = []
def remove_line_end(line):
if line[-1] == '\n':
line = line[:-1]
return line
filelist = []
gold_standards = {}
for myfile in os.listdir(manual_dir):
if myfile.endswith('gold_keywords.txt'):
manual_tokens = []
habibi_div_tokens = []
habibi_no_div_tokens = []
our_method_tokens = []
tfidf_tokens = []
with codecs.open(os.path.join(manual_dir, myfile), 'r', encoding='utf-8', errors='replace') as in_file:
print 'Processing', myfile, ':'
# Manual words are already separated
gold_standard = [remove_line_end(line).strip() for line in in_file]
raw_file = '.'.join(myfile.split('.')[:-2])
filelist.append(raw_file)
gold_standards[raw_file] = gold_standard
print 'Evaluating with these files:'
print filelist
def eval_file(method_dir, raw_file, gold_standard, tolerated):
method_tokens = []
method_dir = os.path.join(eval_dir, method_dir)
print 'Opening file:',os.path.join(method_dir, raw_file)
with codecs.open(os.path.join(method_dir, raw_file), 'r', encoding='utf-8', errors='replace') as in_file:
for line in in_file:
method_tokens += remove_line_end(line).split()
if len(method_tokens)>10:
method_tokens = method_tokens[:10]
method_tokens_stemmed = [stemmer.stem(token) for token in method_tokens]
gold_standard_stemmed = [stemmer.stem(token) for token in gold_standard]
tolerated_stemmed = [stemmer.stem(token) for token in tolerated]
print '==========' + method_dir + '=========='
print 'tokens:', method_tokens, method_tokens_stemmed
print 'gold:', gold_standard, gold_standard_stemmed
print 'tolerated:', tolerated, tolerated_stemmed
recall = len(list(set(method_tokens_stemmed) & set(gold_standard_stemmed))) / float(len(gold_standard_stemmed))
precision = len(list(set(method_tokens_stemmed) & set(gold_standard_stemmed))) / float(len(method_tokens_stemmed))
hrr = len(list(set(method_tokens_stemmed) - set(gold_standard_stemmed) - set(tolerated_stemmed))) / float(len(method_tokens_stemmed))
print 'Recall:', len(list(set(method_tokens_stemmed) & set(gold_standard_stemmed))), '/', len(gold_standard_stemmed), '=', recall
print 'HRR:', len(list(set(method_tokens_stemmed) - set(gold_standard_stemmed) - set(tolerated_stemmed))), '/', len(method_tokens_stemmed), '=', hrr
return recall,precision,hrr
recalls = defaultdict(list)
precs = defaultdict(list)
hrrs = defaultdict(list)
for myfile in filelist:
tolerated_file = myfile + '.tolerated.txt'
with codecs.open(os.path.join(manual_dir, tolerated_file), 'r', encoding='utf-8', errors='replace') as in_file:
print 'Processing', tolerated_file, ':'
# Manual words are already separated
tolerated = [remove_line_end(line).strip() for line in in_file]
for method in methods:
recall,precision,hrr = eval_file(method, myfile, gold_standards[myfile], tolerated)
recalls[method].append(recall)
precs[method].append(precision)
hrrs[method].append(hrr)
for i,key in enumerate(methods):
#print "-----------------Final Scores-----------------"
#print "Method:", "Avg. Recall (Std. Dev.),", "Avg. Precision (Std. Dev.)," , "Avg. HRR (Std. Dev.),", "Avg. Recall - Avg. HRR"
recall = sum(recalls[key]) / len(recalls[key])
recall_std = np.std(recalls[key])
precision = sum(precs[key]) / len(precs[key])
precision_std = np.std(precs[key])
hrr = sum(hrrs[key]) / len(hrrs[key])
hrr_std = np.std(hrrs[key])
difference = recall - hrr
#print key, '%0.4f' % recall, "(", '%0.4f' % recall_std, "), ", '%0.4f' % precision, "(", '%0.4f' % precision_std, "), ", \
#'%0.4f' % hrr, "(", '%0.4f' % hrr_std, "), ", '%0.4f' % difference
#print 'latex:'
print ('(%i)'%(i+1)),pretty_method_names[key],'&',('%0.2f' % (recall*100.0))+'\\%', ' ('+ ('%0.2f' % (recall_std*100.0))+'\\%'+ ')','&', ('%0.2f' % (precision*100.0)), \
'\\% ('+ ('%0.2f' % (precision_std*100.0))+'\\%'+ ')', '&', ('%0.2f' % (hrr*100.0)) + '\\%', '(' + ('%0.2f' % (hrr_std*100.0))+'\\%' + ')','& (NDCG here)','\\\\ \\hline'
|
187195
|
def drive(start, end, step, parameters):
step_results = {
"P:sir.out.S": list(),
"P:sir.out.I": list(),
"P:sir.out.R": list(),
"P:sir.in.dt": list(),
}
S = parameters["P:sir.in.S"]
I = parameters["P:sir.in.I"]
R = parameters["P:sir.in.R"]
for i in range(start, end + 1, step):
(S, I, R) = sir(
S,
I,
R,
parameters["P:sir.in.beta"],
parameters["P:sir.in.gamma"],
step,
)
step_results["P:sir.out.S"].append(S)
step_results["P:sir.out.I"].append(I)
step_results["P:sir.out.R"].append(R)
step_results["P:sir.in.dt"].append(i)
return step_results
"""
Derived from the following:
********************************************************************************
! Input Variables:
! S Amount of susceptible members at the current timestep
! I Amount of infected members at the current timestep
! R Amount of recovered members at the current timestep
! beta Rate of transmission via contact
! gamma Rate of recovery from infection
! dt Next inter-event time
!
! State Variables:
! infected Increase in infected at the current timestep
! recovered Increase in recovered at the current timestep
********************************************************************************
subroutine sir(S, I, R, beta, gamma, dt)
implicit none
double precision S, I, R, beta, gamma, dt
double precision infected, recovered
infected = ((beta*S*I) / (S + I + R)) * dt
recovered = (gamma*I) * dt
S = S - infected
I = I + infected - recovered
R = R + recovered
end subroutine sir
"""
def sir(S: float, I: float, R: float, beta: float, gamma: float, dt: float):
"""
! Input Variables:
! S Amount of susceptible members at the current timestep
! I Amount of infected members at the current timestep
! R Amount of recovered members at the current timestep
! beta Rate of transmission via contact
! gamma Rate of recovery from infection
! dt Next inter-event time
!
! State Variables:
! infected Increase in infected at the current timestep
! recovered Increase in recovered at the current timestep
"""
infected = ((beta * S * I) / (S + I + R)) * dt
recovered = (gamma * I) * dt
S = S - infected
I = I + infected - recovered
R = R + recovered
return (S, I, R)
|
187213
|
from binascii import hexlify
import getpass
import logging
import os
import paramiko
def agent_auth(transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
"""
agent = paramiko.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
logging.getLogger('ssh_client').debug('no agent keys found!')
return
for key in agent_keys:
logging.getLogger('ssh_client').debug('Trying ssh-agent key %s' % hexlify(key.get_fingerprint()))
try:
transport.auth_publickey(username, key)
logging.getLogger('ssh_client').debug('... authentication success!')
return
except paramiko.SSHException:
logging.getLogger('ssh_client').debug('authentication fail.')
class KeyAuthAction(object):
def __init__(self, session, transport, key_file, key_type, username, next_action = None, password = None):
self.session = session
self.transport = transport
self.key_file = key_file
self.key_type = key_type
self.username = username
self.next_action = next_action
self.password = password
def execute(self):
key = None
try:
if self.password:
if self.key_type == 'RSA':
key = paramiko.RSAKey.from_private_key_file(self.key_file, self.password)
else:
key = paramiko.DSSKey.from_private_key_file(self.key_file, self.password)
else:
if self.key_type == 'RSA':
key = paramiko.RSAKey.from_private_key_file(self.key_file)
else:
key = paramiko.DSSKey.from_private_key_file(self.key_file)
except:
self.session.prompt_password(self)
return
if key:
try:
self.transport.auth_publickey(self.username, key)
except paramiko.SSHException:
pass
self._post_execute()
def _post_execute(self):
if self.transport.is_authenticated():
self.session.interactive_shell(self.transport)
elif self.next_action:
self.next_action.execute()
else:
self.session.report_error('Authentication failed.')
def get_pass_desc(self):
return "key file " + self.key_file + " 's password:"
class PromptLoginAction(object):
def __init__(self, session, transport, username):
self.session = session
self.transport = transport
self.username = username
def execute(self):
self.session.prompt_login(self.transport, self.username)
def get_user_ssh_dir():
ssh_dir = os.path.expanduser('~/.ssh')
if os.path.isdir(ssh_dir):
return ssh_dir
ssh_dir = os.path.expanduser('~/ssh')
return ssh_dir if os.path.isdir(ssh_dir) else None
def build_auth_actions(session, t, username):
key_files = {'id_rsa':'RSA', 'id_dsa':'DSS'}
root_action = None
cur_action = None
ssh_dir = get_user_ssh_dir()
for key_file in key_files:
if not ssh_dir:
break
path = os.path.join(ssh_dir, key_file)
if not os.path.exists(path):
continue
action = KeyAuthAction(session, t, path, key_files[key_file], username)
if cur_action:
cur_action.next_action = action
else:
root_action = cur_action = action
cur_action = action
action = PromptLoginAction(session, t, username)
if cur_action:
cur_action.next_action = action
else:
root_action = action
return root_action
def start_client(session, cfg):
username = cfg.username
hostname = cfg.hostname
try:
sock = session._connect()
if not sock:
return
t = paramiko.Transport(sock)
try:
t.start_client()
except paramiko.SSHException:
session.report_error('*** SSH negotiation failed.')
return
ssh_dir = get_user_ssh_dir()
if ssh_dir:
try:
keys_file = os.path.join(ssh_dir, 'known_hosts')
keys = paramiko.util.load_host_keys(keys_file) if os.path.isfile(keys_file) else {}
except IOError:
logging.getLogger('ssh_client').exception('unable to open host keys file:{}'.format(os.path.join(ssh_dir, 'known_hosts')))
session.report_error('*** Unable to open host keys file')
keys = {}
else:
keys = {}
# check server's host key -- this is important.
key = t.get_remote_server_key()
if hostname not in keys:
logging.getLogger('ssh_client').warn('*** WARNING: Unknown host key, hostname is not in the list!')
elif key.get_name() not in keys[hostname]:
logging.getLogger('ssh_client').warn('*** WARNING: Unknown host key, key is not found in host keys!')
elif keys[hostname][key.get_name()] != key:
logging.getLogger('ssh_client').warn('*** WARNING: Host key has changed!!!')
# get username
if username == '':
username = getpass.getuser()
agent_auth(t, username)
if not t.is_authenticated():
if cfg.password:
try:
t.auth_password(username, cfg.password)
except paramiko.SSHException:
pass
if not t.is_authenticated():
action = build_auth_actions(session, t, username)
action.execute()
return
if not t.is_authenticated():
session.report_error('Authentication failed.')
t.close()
return
session.interactive_shell(t)
except Exception as e:
logging.getLogger('ssh_client').exception('ssh client caught exception:')
session.report_error(str(e))
try:
t.close()
except:
pass
class PassAuthAction(KeyAuthAction):
def __init__(self, session, transport, username, password):
super(PassAuthAction, self).__init__(session, transport, None, None, username, None, password)
self.prompt_pass = True
def execute(self):
try:
self.transport.auth_password(self.username, self.password)
except paramiko.SSHException:
pass
if not self.transport.is_authenticated() and self.prompt_pass:
self.session.prompt_password(self)
return
self._post_execute()
def get_pass_desc(self):
return self.username + " 's password:"
def try_login(session, t, key_file, key_type, username, password):
root_action = None
if os.path.exists(key_file):
root_action = KeyAuthAction(session, t, key_file, key_type, username)
action = PassAuthAction(session, t, username, password)
action.prompt_pass = False
if root_action:
root_action.next_action = action
else:
root_action = action
root_action.execute()
|
187271
|
import argparse
import configparser
import os
import sys
sys.path.append('..')
sys.path.append('../..') # if running in this folder
from vimms.Common import IN_SILICO_OPTIMISE_TOPN, load_obj, add_log_file, IN_SILICO_OPTIMISE_SMART_ROI, \
IN_SILICO_OPTIMISE_WEIGHTED_DEW
from vimms.InSilicoSimulation import extract_chemicals, get_timing, extract_timing, run_TopN, run_SmartROI, \
run_WeightedDEW, extract_boxes, evaluate_boxes_as_dict, evaluate_boxes_as_array, save_counts, string_to_list, \
plot_counts
class InSilicoSimulator(object):
def __init__(self, sample_name, seed_file, out_dir, controller_name, config_parser):
self.sample_name = sample_name
self.seed_file = seed_file
self.out_dir = out_dir
self.controller_name = controller_name
self.config_parser = config_parser
def run(self):
# get the chemicals, timing, peak sample object and parameters
chems = self.get_chems()
scan_duration = self.get_scan_duration()
params = self.get_params()
# simulate controller and evaluate performance
self.simulate(chems, scan_duration, params)
self.evaluate(params)
def get_chems(self):
# extract chemicals from seed_file
params_dict = {
'mz_tol': self.config_parser.getint('roi_extraction', 'mz_tol'),
'mz_units': self.config_parser.get('roi_extraction', 'mz_units'),
'min_length': self.config_parser.getint('roi_extraction', 'min_length'),
'min_intensity': self.config_parser.getint('roi_extraction', 'min_intensity'),
'start_rt': self.config_parser.getint('experiment', 'min_rt'),
'stop_rt': self.config_parser.getint('experiment', 'max_rt')
}
chems = extract_chemicals(self.seed_file, params_dict)
return chems
def get_scan_duration(self):
# if provided, read timing information from config
# otherwise extract timing from the seed file too
# parse time dict, this really should be computed from the data
time_dict_str = self.config_parser.get('simulation', 'scan_duration')
time_dict = get_timing(time_dict_str) if len(time_dict_str) > 0 else extract_timing(self.seed_file)
return time_dict
def simulate(self):
raise NotImplementedError()
def evaluate(self, params):
raise NotImplementedError()
class TopNSimulator(InSilicoSimulator):
def get_params(self):
# get experiment parameters
ionisation_mode = self.config_parser.get('experiment', 'ionisation_mode')
isolation_width = self.config_parser.getfloat('experiment', 'isolation_width')
min_rt = self.config_parser.getfloat('experiment', 'min_rt')
max_rt = self.config_parser.getint('experiment', 'max_rt')
# get simulation parameters
N = self.config_parser.getint('simulation', 'N')
mz_tol = self.config_parser.getint('simulation', 'mz_tol')
rt_tol = self.config_parser.getint('simulation', 'rt_tol')
min_ms1_intensity = self.config_parser.getint('simulation', 'min_ms1_intensity')
params = {
'controller_name': self.controller_name,
'ionisation_mode': ionisation_mode,
'sample_name': self.sample_name,
'isolation_width': isolation_width,
'N': N,
'mz_tol': mz_tol,
'rt_tol': rt_tol,
'min_ms1_intensity': min_ms1_intensity,
'min_rt': min_rt,
'max_rt': max_rt
}
return params
def simulate(self, chems, scan_duration, params):
run_TopN(chems, scan_duration, params, self.out_dir)
def evaluate(self, params):
xml_file = self.config_parser.get('evaluation', 'mzmine_xml_file')
mzmine_command = self.config_parser.get('evaluation', 'mzmine_command')
boxes = extract_boxes(self.seed_file, self.out_dir, mzmine_command, xml_file)
evaluate_boxes_as_dict(boxes, self.out_dir)
class SmartROISimulator(InSilicoSimulator):
def get_params(self):
# get experiment parameters
ionisation_mode = self.config_parser.get('experiment', 'ionisation_mode')
isolation_width = self.config_parser.getfloat('experiment', 'isolation_width')
min_rt = self.config_parser.getfloat('experiment', 'min_rt')
max_rt = self.config_parser.getint('experiment', 'max_rt')
# get simulation parameters
N = self.config_parser.getint('simulation', 'N')
mz_tol = self.config_parser.getint('simulation', 'mz_tol')
rt_tol = self.config_parser.getint('simulation', 'rt_tol')
min_ms1_intensity = self.config_parser.getint('simulation', 'min_ms1_intensity')
# get additional SmartROI parameters
iif_values = self.config_parser.get('simulation', 'iif_values')
dp_values = self.config_parser.get('simulation', 'dp_values')
iif_values = string_to_list(iif_values, convert=float)
dp_values = string_to_list(dp_values, convert=float)
min_roi_intensity = self.config_parser.getfloat('simulation', 'min_roi_intensity')
min_roi_length = self.config_parser.getint('simulation', 'min_roi_length')
min_roi_length_for_fragmentation = self.config_parser.getint('simulation', 'min_roi_length_for_fragmentation')
params = {
'controller_name': self.controller_name,
'ionisation_mode': ionisation_mode,
'sample_name': self.sample_name,
'isolation_width': isolation_width,
'N': N,
'mz_tol': mz_tol,
'rt_tol': rt_tol,
'min_ms1_intensity': min_ms1_intensity,
'min_rt': min_rt,
'max_rt': max_rt,
'iif_values': iif_values,
'dp_values': dp_values,
'min_roi_intensity': min_roi_intensity,
'min_roi_length': min_roi_length,
'min_roi_length_for_fragmentation': min_roi_length_for_fragmentation
}
return params
def simulate(self, chems, scan_duration, params):
run_SmartROI(chems, scan_duration, params, self.out_dir)
def evaluate(self, params):
# extract peak boxes
xml_file = self.config_parser.get('evaluation', 'mzmine_xml_file')
mzmine_command = self.config_parser.get('evaluation', 'mzmine_command')
boxes = extract_boxes(self.seed_file, self.out_dir, mzmine_command, xml_file)
# extract counts
pattern = 'SMART_{}_{}_{}.mzml'
yticks = params['iif_values']
xticks = params['dp_values']
counts = evaluate_boxes_as_array(boxes, self.out_dir, yticks, xticks, pattern, params)
save_counts(counts, self.out_dir, params['controller_name'], params['sample_name'])
# plot counts
xlabel = r'$\beta$'
ylabel = r'$\alpha$'
title = 'SmartROI simulations (%s)' % self.sample_name
out_file = os.path.join(self.out_dir, '%s_%s.png' % (self.controller_name, self.sample_name))
plot_counts(counts, out_file, title, xlabel, xticks, ylabel, yticks)
class WeightedDEWSimulator(InSilicoSimulator):
def get_params(self):
# get experiment parameters
ionisation_mode = self.config_parser.get('experiment', 'ionisation_mode')
isolation_width = self.config_parser.getfloat('experiment', 'isolation_width')
min_rt = self.config_parser.getfloat('experiment', 'min_rt')
max_rt = self.config_parser.getint('experiment', 'max_rt')
# get simulation parameters
N = self.config_parser.getint('simulation', 'N')
mz_tol = self.config_parser.getint('simulation', 'mz_tol')
min_ms1_intensity = self.config_parser.getint('simulation', 'min_ms1_intensity')
# get additional SmartROI parameters
t0_values = self.config_parser.get('simulation', 't0_values')
rt_tol_values = self.config_parser.get('simulation', 'rt_tol_values')
t0_values = string_to_list(t0_values, convert=float)
rt_tol_values = string_to_list(rt_tol_values, convert=float)
params = {
'controller_name': self.controller_name,
'ionisation_mode': ionisation_mode,
'sample_name': self.sample_name,
'isolation_width': isolation_width,
'N': N,
'mz_tol': mz_tol,
'min_ms1_intensity': min_ms1_intensity,
'min_rt': min_rt,
'max_rt': max_rt,
't0_values': t0_values,
'rt_tol_values': rt_tol_values
}
return params
def simulate(self, chems, time_dict, params):
run_WeightedDEW(chems, time_dict, params, self.out_dir)
def evaluate(self, params):
# extract peak boxes
xml_file = self.config_parser.get('evaluation', 'mzmine_xml_file')
mzmine_command = self.config_parser.get('evaluation', 'mzmine_command')
boxes = extract_boxes(self.seed_file, self.out_dir, mzmine_command, xml_file)
# extract counts
pattern = 'WeightedDEW_{}_{}_{}.mzml'
yticks = params['t0_values']
xticks = params['rt_tol_values']
counts = evaluate_boxes_as_array(boxes, self.out_dir, yticks, xticks, pattern, params)
save_counts(counts, self.out_dir, params['controller_name'], params['sample_name'])
xlabel = 't0'
ylabel = 'rt_tol'
title = 'WeightedDEW simulations (%s)' % self.sample_name
out_file = os.path.join(self.out_dir, '%s_%s.png' % (self.controller_name, self.sample_name))
plot_counts(counts, out_file, title, xlabel, xticks, ylabel, yticks)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='In-silico Optimisation of Fragmentation Strategy using ViMMS')
parser.add_argument('sample_name', type=str)
parser.add_argument('seed_file', type=str)
parser.add_argument('out_dir', type=str)
parser.add_argument('config_file', type=str)
args = parser.parse_args()
# parse config file
config_parser = configparser.RawConfigParser()
config_file_path = args.config_file
config_parser.read(config_file_path)
# set output log file
controller_name = config_parser.get('experiment', 'controller_name')
log_file = '%s_%s.log' % (controller_name, args.sample_name)
log_path = os.path.join(args.out_dir, log_file)
add_log_file(log_path, 'INFO')
# run simulation here
sample_name = args.sample_name
seed_file = args.seed_file
out_dir = args.out_dir
choices = {
IN_SILICO_OPTIMISE_TOPN: TopNSimulator(sample_name, seed_file, out_dir, controller_name, config_parser),
IN_SILICO_OPTIMISE_SMART_ROI: SmartROISimulator(sample_name, seed_file, out_dir, controller_name,
config_parser),
IN_SILICO_OPTIMISE_WEIGHTED_DEW: WeightedDEWSimulator(sample_name, seed_file, out_dir, controller_name,
config_parser),
}
sim = choices[controller_name]
sim.run()
|
187273
|
from socialreaper.apis import ApiError
from socialreaper.iterators import Source, Iter, IterError
from instaphyte.api import InstagramAPI
class Instagram(Source):
def __init__(self):
super().__init__()
self.api = InstagramAPI()
class InstagramIter(Iter):
def __init__(self, node, function, count, response_key):
super().__init__()
self.node = node
self.function = function
self.max = count
self.response_key = response_key
self.max_id = None
def get_data(self):
self.page_count += 1
try:
self.response = self.function(self.node, self.max_id)
page = self.response['graphql'][self.response_key][
"edge_" + self.response_key + "_to_media"]
self.data = page["edges"]
self.max_id = page["page_info"]["end_cursor"]
if not self.max_id:
raise StopIteration
except ApiError as e:
raise IterError(e, vars(self))
def hashtag(self, tag, count=0):
return self.InstagramIter(tag, self.api.hashtag, count, "hashtag")
def location(self, tag, count=0):
return self.InstagramIter(tag, self.api.location, count, "location")
|
187293
|
from osrf_pycommon.process_utils import asyncio
from osrf_pycommon.process_utils.async_execute_process import async_execute_process
from osrf_pycommon.process_utils import get_loop
# allow module to be importable for --cover-inclusive
try:
from osrf_pycommon.process_utils.async_execute_process_trollius import From
except ImportError:
TROLLIUS_FOUND = False
else:
TROLLIUS_FOUND = True
from osrf_pycommon.process_utils.async_execute_process_trollius import Return
from .impl_aep_protocol import create_protocol
loop = get_loop()
@asyncio.coroutine
def run(cmd, **kwargs):
transport, protocol = yield From(async_execute_process(
create_protocol(), cmd, **kwargs))
retcode = yield asyncio.From(protocol.complete)
raise Return(protocol.stdout_buffer, protocol.stderr_buffer,
retcode)
|
187311
|
from __future__ import division
import numpy as np
import math
def vectorAdd(v1, v2):
ans = (v1[0]+v2[0], v1[1]+v2[1], v1[2]+v2[2])
return ans
def vectorSum(vList):
ans = (0, 0, 0)
for v in vList:
ans = vectorAdd(ans, v)
return ans
def vectorCross(v1, v2):
v1 = list(v1)
v2 = list(v2)
ans = tuple(np.cross(v1,v2))
return ans
def vectorDot(v1, v2):
ans = v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
return ans
def vectorMultiplyC(v1, C):
ans = (v1[0]*C, v1[1]*C, v1[2]*C)
return ans
def vectorDividedC(v1, C):
ans = (float(v1[0])/C, float(v1[1])/C, float(v1[2])/C)
return ans
def pointsMean(pList):
sum_= vectorSum(pList)
ans = vectorDividedC(sum_, len(pList))
return ans
def pointsDistance(p1, p2):
vec = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
dis = math.sqrt(math.pow(vec[0], 2) +
math.pow(vec[1], 2) +
math.pow(vec[2], 2) )
return dis
def pointsDirection(p1, p2):
vec = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
scalar = float(np.linalg.norm(vec))
if not scalar==0:
ans = (vec[0]/scalar, vec[1]/scalar, vec[2]/scalar)
else:
ans = (vec[0], vec[1], vec[2])
return ans
def pointsDirectionPow(p1, p2, pow_):
vec = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
ans = (math.pow(vec[0],pow_), math.pow(vec[1],pow_),
math.pow(vec[2],pow_))
return ans
def pointsNormal(c, p1, p2):
vec1 = pointsDirection(c, p1)
vec2 = pointsDirection(c, p2)
normal = vectorCross(vec1, vec2)
return normal
def pointsSample(p1, p2, rate):
ans = [p1]
vec = pointsDirectionPow(p1, p2, 1)
step = vectorDividedC(vec, rate)
for i in range(1, rate):
xyz = vectorAdd(p1, vectorMultiplyC(step, i))
ans.append(xyz)
ans.append(p2)
return ans
def planeEquation(normal, p):
d = -vectorDot(normal, p)
equation = normal + (d,)
return equation
def vectorPlaneHit(vec, plane):
normal = (plane[0], plane[1], plane[2])
nv = vectorDot(normal, vec)
d = plane[3]
if nv == 0:
return None
t = -d / nv
if t < 0:
return None
point = vectorMultiplyC(vec, t)
return point
def normal2color(normal):
vec = vectorMultiplyC(normal, -0.5)
color = vectorAdd(vec, (0.5,0.5,0.5))
return color
|
187322
|
import sys
if sys.version_info < (3,):
integer_types = (int, long)
else:
integer_types = (int,)
if sys.version_info < (3,):
string_types = (str, unicode)
else:
string_types = (str,)
|
187328
|
from glob import glob
import os
import string
from tempfile import mkdtemp
from dcmstack import parse_and_stack, NiftiWrapper
def sanitize_path_comp(path_comp):
result = []
for char in path_comp:
if not char in string.letters + string.digits + '-_.':
result.append('_')
else:
result.append(char)
return ''.join(result)
def get_dicom_info(dicom_dir, dest):
"""Return a freesurfer style dicom info generator
"""
fl = sorted(glob(os.path.join(dicom_dir, '*.dcm')))
stack = parse_and_stack(fl, force=True, warn_on_except=True)
info = {}
for key in sorted(stack):
key_fields = key.split('-')
idx = int(key_fields[0])
name = key_fields[1]
stack_object = stack[key]
if not stack_object.error:
size = list(stack_object.get_shape())
if len(size) == 3:
size.append(1)
err_status = 'ok'
out_fn = sanitize_path_comp(key) + '.nii.gz'
out_path = os.path.join(dest, out_fn)
nii = stack_object.to_nifti(embed_meta=True)
nii_wrp = NiftiWrapper(nii)
meta_fn = out_fn + '.json'
meta_path = os.path.join(dest, meta_fn)
with open(meta_path, 'w') as fp:
fp.write(nii_wrp.meta_ext.to_json())
nii.to_filename(out_path)
else:
size = [0, 0, 0, 0]
err_status = 'err'
out_fn = None
meta_fn = None
filepath = out_fn
filename = stack_object._files_info[0][2]
info[idx] = dict(idx=idx, name=name, err_status=err_status,
size=size, filename=filename,
filepath=filepath,
metapath=meta_fn)
size = [str(val) for val in size]
print '\t'.join([str(idx), name, err_status] + size + [filename])
return info
def unzip_and_extract(filename, dest):
outdir = mkdtemp()
if '.tgz' in filename or '.tar.gz' in filename:
import tarfile
bundle = tarfile.open(filename, 'r')
elif '.zip' in filename:
import zipfile
bundle = zipfile.ZipFile(filename, 'r')
else:
raise ValueError('Unknown compression format. Only zip and tar+gzip supported')
bundle.extractall(outdir)
dcmdir = None
print outdir
for r,d,f in os.walk(outdir):
print r
for files in f:
print files
if files.endswith(".dcm"):
dcmdir = r
break
print dcmdir
info = get_dicom_info(dcmdir, dest)
return info
|
187367
|
import numpy as np
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from matplotlib import cm
s=20
X, y = load_iris(return_X_y=True)
X = X[:, [2, 3]]
f, ax = plt.subplots(figsize=(4, 2.2))
ax.set_xlim(0, 7)
ax.set_ylim(0, 2.7)
x_ = ax.set_xlabel('Petal length')
y_ = ax.set_ylabel('Petal width')
plt.savefig('images/iris_1.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([X[0, 0]], [X[0, 1]], c='k', s=s)
plt.savefig('images/iris_2.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([X[51, 0]], [X[51, 1]], c='k', s=s)
plt.savefig('images/iris_3.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter(X[:, 0], X[:, 1], c='k', s=s)
plt.savefig('images/iris_4.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
for i, name in enumerate(['Setosa', 'Versicolor', 'Virginica']):
loc = np.where(y == i)[0]
plt.scatter(X[loc, 0], X[loc, 1], s=s, label=name)
plt.legend()
plt.savefig('images/iris_5.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
rf = RandomForestClassifier().fit(X, y)
xc = [1, .5]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_6.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([xc[0]], [xc[1]], c='blue', marker='x', s=4*s)
plt.savefig('images/iris_7.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [4, 1.2]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_8.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([xc[0]], [xc[1]], c='orange', marker='x', s=4*s)
plt.savefig('images/iris_9.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [5, 2.2]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_10.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([xc[0]], [xc[1]], c='green', marker='x', s=4*s)
plt.savefig('images/iris_11.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [2.5, .8]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_12.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [4.9, 1.6]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_13.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [6, .2]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_14.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
rf = RandomForestClassifier().fit(X, y)
xx, yy = np.meshgrid(np.linspace(0, 7),
np.linspace(0, 2.7))
Z = rf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
colors = ['b', 'orange', 'green']
plt.contourf(xx, yy, Z, levels=2, alpha=0.3, colors=colors)
plt.savefig('images/iris_15.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
|
187403
|
import math
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple
from ... import embed
from ..evaluation import Evaluator
from .base import BaseCallback
if TYPE_CHECKING:
from docarray import DocumentArray
from ..base import BaseTuner
class EvaluationCallback(BaseCallback):
"""
A callback that uses the Evaluator to calculate IR metrics at the end of each epoch.
When used with other callbacks that rely on metrics, like checkpoints and logging,
this callback should be defined first, so that it precedes in execution.
"""
def __init__(
self,
query_data: 'DocumentArray',
index_data: Optional['DocumentArray'] = None,
metrics: Optional[
Dict[str, Tuple[Callable[..., float], Dict[str, Any]]]
] = None,
exclude_self: bool = True,
limit: int = 20,
distance: str = 'cosine',
num_workers: int = 1,
):
"""
:param query_data: Search data used by the evaluator at the end of each epoch,
to evaluate the model.
:param index_data: Index data or catalog used by the evaluator at the end of
each epoch, to evaluate the model.
:param metrics: A dictionary that specifies the metrics to calculate. It maps
metric names to tuples of metric functions and their keyword arguments. If
set to None, default metrics are computed.
:param exclude_self: Whether to exclude self when matching.
:param limit: The number of top search results to consider when computing the
evaluation metrics.
:param distance: The type of distance metric to use when matching query and
index docs, available options are ``'cosine'``, ``'euclidean'`` and
``'sqeuclidean'``.
:param num_workers: The number of workers to use when matching query and
index data.
"""
self._query_data = query_data
self._index_data = index_data
self._metrics = metrics
self._exclude_self = exclude_self
self._limit = limit
self._distance = distance
self._num_workers = num_workers
self._query_pbar_id = None
self._index_pbar_id = None
self._match_pbar_id = None
def on_fit_begin(self, tuner: 'BaseTuner'):
self._query_pbar_id = tuner._progress_bar.add_task(
'Embedding queries', visible=False, start=False
)
self._index_pbar_id = tuner._progress_bar.add_task(
'Embedding index', visible=False, start=False
)
self._match_pbar_id = tuner._progress_bar.add_task(
'Matching', visible=False, start=False
)
def on_epoch_end(self, tuner: 'BaseTuner'):
# start query data progress bar
tuner._progress_bar.reset(
self._query_pbar_id,
visible=True,
description='Embedding queries',
total=math.ceil(len(self._query_data) / tuner._batch_size),
completed=0,
metrics='',
)
# embed queries
for batch in self._query_data.batch(tuner._batch_size):
embed(
batch,
tuner._embed_model,
device=tuner._device_name,
batch_size=tuner._batch_size,
preprocess_fn=tuner._preprocess_fn,
collate_fn=tuner._collate_fn,
)
tuner._progress_bar.update(task_id=self._query_pbar_id, advance=1)
tuner._progress_bar.update(task_id=self._query_pbar_id, visible=False)
if self._index_data:
# start index data progress bar
tuner._progress_bar.reset(
self._index_pbar_id,
visible=True,
description='Embedding index',
total=math.ceil(len(self._index_data) / tuner._batch_size),
completed=0,
metrics='',
)
# embed index
for batch in self._index_data.batch(tuner._batch_size):
embed(
batch,
tuner._embed_model,
device=tuner._device_name,
batch_size=tuner._batch_size,
preprocess_fn=tuner._preprocess_fn,
collate_fn=tuner._collate_fn,
)
tuner._progress_bar.update(task_id=self._index_pbar_id, advance=1)
index_data = self._index_data
tuner._progress_bar.update(task_id=self._index_pbar_id, visible=False)
else:
index_data = self._query_data
# start matching progress bar
tuner._progress_bar.reset(
self._match_pbar_id,
visible=True,
description='Matching',
metrics='',
)
# compute metrics
evaluator = Evaluator(self._query_data, index_data, metrics=self._metrics)
tuner.state.eval_metrics = evaluator.evaluate(
exclude_self=self._exclude_self,
limit=self._limit,
distance=self._distance,
num_workers=self._num_workers,
)
tuner._progress_bar.update(task_id=self._match_pbar_id, visible=False)
|
187419
|
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv("boston_house_prices.csv")
print("Number of samples: %d number of features: %d"
% (data.shape[0], data.shape[1]))
print("Columns:")
print(data.columns)
X = data.drop("MEDV", axis=1)
y = data.MEDV
X_train, X_test, y_train, y_test = train_test_split(X, y)
print(X_train.shape)
# plotting average room number RM vs MEDV
data.plot("RM", "MEDV", kind="scatter")
|
187434
|
import os
from ..praat import PraatAnalysisFunction
from pyraat.parse_outputs import parse_track_script_output
def track_pulse_parse_output(text):
track_text = []
pulse_text = []
blank_count = 0
add_track = True
for line in text.splitlines():
line = line.strip()
if line:
blank_count = 0
else:
blank_count += 1
if blank_count > 1:
add_track = False
continue
if add_track:
track_text.append(line)
else:
pulse_text.append(line)
track = parse_track_script_output('\n'.join(track_text))
pulses = []
for line in pulse_text:
line = line.strip()
if not line:
continue
pulses.append(float(line))
return track, pulses
class PraatPitchTrackFunction(PraatAnalysisFunction):
def __init__(self, praat_path=None, time_step=0.01, min_pitch=75, max_pitch=600, silence_threshold=0.03,
voicing_threshold=0.45, octave_cost=0.01, octave_jump_cost=0.35, voiced_unvoiced_cost=0.14,
with_pulses=False):
script_dir = os.path.dirname(os.path.abspath(__file__))
if with_pulses:
script = os.path.join(script_dir, 'pitch_track_with_pulses.praat')
else:
script = os.path.join(script_dir, 'pitch_track.praat')
arguments = [time_step, min_pitch, max_pitch, silence_threshold, voicing_threshold, octave_cost,
octave_jump_cost, voiced_unvoiced_cost]
super(PraatPitchTrackFunction, self).__init__(script, praat_path, arguments)
if with_pulses:
self._function._output_parse_function = track_pulse_parse_output
class PraatSegmentPitchTrackFunction(PraatAnalysisFunction):
def __init__(self, praat_path=None, time_step=0.01, min_pitch=75, max_pitch=600, silence_threshold=0.03,
voicing_threshold=0.45, octave_cost=0.01, octave_jump_cost=0.35, voiced_unvoiced_cost=0.14,
with_pulses=False):
script_dir = os.path.dirname(os.path.abspath(__file__))
if with_pulses:
script = os.path.join(script_dir, 'pitch_track_with_pulses_segment.praat')
else:
script = os.path.join(script_dir, 'pitch_track_segment.praat')
arguments = [time_step, min_pitch, max_pitch, silence_threshold, voicing_threshold, octave_cost,
octave_jump_cost, voiced_unvoiced_cost]
super(PraatSegmentPitchTrackFunction, self).__init__(script, praat_path, arguments)
if with_pulses:
self._function._output_parse_function = track_pulse_parse_output
|
187467
|
import math
import numpy as np
from scipy.spatial.transform import Rotation
"""
The rotations can of two types:
1. In a global frame of reference (also known as rotation w.r.t. fixed or extrinsic frame)
2. In a body-centred frame of reference (also known as rotation with respect to current frame of reference.
It is also referred as rotation w.r.t. intrinsic frame).
For more details on intrinsic and extrinsic frames refer: https://en.wikipedia.org/wiki/Euler_angles#Definition_by_intrinsic_rotations
Euler angles as ROLL-PITCH-YAW refer the following links:
* [Tait–Bryan angles](https://en.wikipedia.org/wiki/Euler_angles#Tait–Bryan_angles#Conventions)
* [Euler angls as YAW-PITCH-ROLL](https://en.wikipedia.org/wiki/Euler_angles#Conventions_2)
* [Rotation using Euler Angles](https://adipandas.github.io/posts/2020/02/euler-rotation/)
* [scipy: ``from_euler``](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_euler.html#scipy.spatial.transform.Rotation.from_euler)
* [scipy: ``as_euler``](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_euler.html#scipy.spatial.transform.Rotation.as_euler)
To get the angles as yaw-pitch-roll we calculate rotation with intrinsic frame of reference.
1. In intrinsic frame we start with `yaw` to go from inertial frame `0` to frame `1`.
2. Than do `pitch` in frame `1` to go from frame `1` to frame `2`.
3. Than do `roll` in frame `2` to go from frame `2` to body frame `3`.
"""
INTRINSIC_ROTATION = "ZYX"
EXTRINSIC_ROTATION = "xyz"
def add_gaussian_noise(vector, noise_mag):
"""
Add gaussian noise to the input vector.
:param vector: vector of n-dimensions
:type vector: numpy.ndarray
:param noise_mag: magnitude of gaussian noise to add to input vector
:type noise_mag: float
:return: vector of same dimensions as input vector
:rtype: numpy.ndarray
"""
vector = vector + np.random.randn(*vector.shape) * float(noise_mag)
return vector
def euler2quat_raw(rpy):
"""
Euler angles of roll, pitch, yaw in radians. Returns quaternion in scalar first format.
:param rpy: vector of (roll, pitch, yaw) with shape (3,)
:type rpy: numpy.ndarray
:return: quaternion as (w, x, y, z) with shape (4,)
:rtype: numpy.ndarray
"""
roll, pitch, yaw = rpy
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return np.array([w, x, y, z])
def quat2euler_raw(quat):
"""
Convert quaternion orientation to euler angles.
:param quat: quaternion as (w, x, y, z) with shape (4,)
:type quat: numpy.ndarray
:return: vector of (roll, pitch, yaw) with shape (3,)
:rtype: numpy.ndarray
"""
w, x, y, z = quat
sinr_cosp = 2.0 * (w * x + y * z)
cosr_cosp = 1.0 - 2.0 * (x * x + y * y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
sinp = 2.0 * (w * y - z * x)
if abs(sinp) >= 1.0:
pitch = np.copysign(math.pi*0.5, sinp) # use 90 degrees if out of range
else:
pitch = np.arcsin(sinp)
siny_cosp = 2. * (w * z + x * y)
cosy_cosp = 1. - 2. * (y * y + z * z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return np.array([roll, pitch, yaw])
def quat2euler(quat, noise_mag=0):
"""
Convert quaternion to euler.
:param quat: quaternion in scalar first format
:type quat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: numpy array of euler angles as roll, pitch, yaw (x, y, z) in radians
:rtype: numpy.ndarray
"""
quat = np.roll(quat, -1) # convert to scalar last
rot = Rotation.from_quat(quat) # rotation object
euler_angles = rot.as_euler(INTRINSIC_ROTATION)
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rpy = euler_angles[::-1]
return rpy
def euler2quat(euler, noise_mag=0):
"""
Euler angles are transformed to corresponding quaternion.
:param euler: vector of euler angles with shape (3,) in the order of roll-pitch-yaw (XYZ) in radians
:type euler: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: quaternion vector in scalar first format with shape (4,)
:rtype: numpy.ndarray
"""
euler = np.array([euler[2], euler[1], euler[0]]) # convert to YAW-PITCH-ROLL
if noise_mag:
euler = add_gaussian_noise(euler, noise_mag)
rot = Rotation.from_euler(INTRINSIC_ROTATION, euler)
quat_scalar_last = rot.as_quat()
quat = np.roll(quat_scalar_last, 1)
return quat
def quat2rot(quat, noise_mag=0):
"""
Method to convert quaternion vector to 3x3 direction cosine matrix.
:param quat: quaternion (in scalar first format)
:type quat: numpy.ndarray
:param noise_mag: (float) magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: rotation matrix SO(3)
:rtype: numpy.ndarray
"""
quat = np.roll(quat, -1) # quaternion in scalar last format
rot = Rotation.from_quat(quat) # rotation object
euler_angles = rot.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler_angles) # yaw-pitch-roll
rot_mat = rot_.as_matrix() # direction cosine matrix 3x3
return rot_mat
def rot2quat(rot_mat, noise_mag=0):
"""
Method to convert rotation matrix (SO3) to quaternion
:param rot_mat: direction cosine matrix of 3x3 dimensions
:type rot_mat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians.
:type noise_mag: float
:return quat: quaternion (in scalar first format) with a shape (4,).
:rtype: numpy.ndarray
"""
rot = Rotation.from_matrix(rot_mat)
euler_angles = rot.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler_angles) # yaw-pitch-roll
quat_scalar_last = rot_.as_quat()
quat = np.roll(quat_scalar_last, 1)
return quat
def euler2rot(euler, noise_mag=0):
"""
Convert euler angles to rotation (direction cosine) matrix
:param euler: vector with shape (3,) including euler angles as (roll, pitch, yaw) in radians
:type euler: numpy.ndarray
:param noise_mag: magnitude of gaussian noise included in euler angle
:type noise_mag: float
:return: rotation matrix of shape (3, 3)
:rtype: numpy.ndarray
"""
euler = np.array([euler[2], euler[1], euler[0]]) # convert roll-pitch-yaw to yaw-pitch-roll
if noise_mag:
euler = add_gaussian_noise(euler, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler)
rot_mat = rot_.as_matrix()
return rot_mat
def rot2euler(rot_mat, noise_mag=0):
"""
Convert rotation matrix (SO3) to euler angles
:param rot_mat: rotation matrix of shape (3, 3)
:type rot_mat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise included in euler angle
:type noise_mag: float
:return: euler angles as (roll, pitch, yaw) with shape (3,)
:rtype: numpy.ndarray
"""
rot_ = Rotation.from_matrix(rot_mat)
euler_angles = rot_.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rpy = np.array([euler_angles[2], euler_angles[1], euler_angles[0]])
return rpy
def quat2euler_scipy(quat):
quat = np.roll(quat, shift=-1) # scalar last
rpy = Rotation.from_quat(quat).as_euler('xyz')
return rpy
def euler2quat_scipy(rpy):
quat = Rotation.from_euler('xyz', rpy).as_quat()
quat = np.roll(quat, shift=1) # scalar first
return quat
def rotmat_world2body_scipy(rpy):
rotmat = Rotation.from_euler('xyz', rpy).as_matrix()
return rotmat
def rotmat_pqr2euler_rate(rpy):
rotmat = np.array([
[1, np.sin(rpy[0])*np.tan(rpy[1]), np.cos(rpy[0])*np.tan(rpy[1])],
[0, np.cos(rpy[0]), -np.sin(rpy[1])],
[0, np.sin(rpy[0])/np.cos(rpy[1]), np.cos(rpy[0])/np.cos(rpy[1])]
])
return rotmat
def cross(a, b):
a_skew = np.array(
[0, -a[2], a[1]],
[a[2], 0, -a[0]],
[-a[1], a[0], 0]
)
return np.dot(a_skew, b)
|
187475
|
import pytest
import glob
import os
import sys
import pathlib
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_PROJECT = os.path.abspath(PATH_HERE+"/../")
PATH_DATA = os.path.abspath(PATH_PROJECT+"/data/abfs/")
PATH_HEADERS = os.path.abspath(PATH_PROJECT+"/data/headers/")
try:
# this ensures pyABF is imported from this specific path
sys.path.insert(0, "src")
import pyabf
except:
raise ImportError("couldn't import local pyABF")
@pytest.mark.slow
@pytest.mark.parametrize("abfPath", glob.glob("data/abfs/*.abf"))
def test_saveABF1_forEveryFile(abfPath):
testOutput = pathlib.Path("testOutput")
if not testOutput.exists():
testOutput.mkdir()
abf = pyabf.ABF(abfPath)
# don't attempt to save ABFs with variable-length sweeps
if (abf.nOperationMode == 1):
return
abf.saveABF1(f"testOutput/{abf.abfID}.abf")
|
187542
|
from confidant import settings
from confidant.app import create_app
if __name__ == '__main__':
app = create_app()
app.run(
host=settings.get('HOST', '127.0.0.1'),
port=settings.get('PORT', 5000),
debug=settings.get('DEBUG', True)
)
|
187556
|
import os
from typing import Dict, List
import uvicorn
from dotenv import load_dotenv
from fastapi import FastAPI, Security, Depends, HTTPException
from fastapi.security.api_key import APIKeyHeader, APIKey
from impl import extractor
from pydantic import BaseModel
from starlette.status import HTTP_403_FORBIDDEN
load_dotenv()
app = FastAPI()
class Values(BaseModel):
values: List = []
class Value(Values):
recordId: str
data: Dict[str, str] = None
API_KEY = os.environ['KEY']
API_KEY_NAME = "Ocp-Apim-Subscription-Key"
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
async def get_api_key(
api_key_header: str = Security(api_key_header),
):
if api_key_header == API_KEY:
return api_key_header
else:
raise HTTPException(
status_code = HTTP_403_FORBIDDEN, detail = "Key not present"
)
@app.get('/api/healthcheck', status_code=200)
async def healthcheck():
return 'Ready'
@app.post('/api/parse')
def parse(values: Values, api_key: APIKey = Depends(get_api_key)): #
body = values.dict()
if not body:
return 'Expected text within body of request. No text found.', status.HTTP_400_BAD_REQUEST
else:
return extractor.parse(body)
# Remove these two lines below for non-debug/production mode
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000)
|
187561
|
import math
import torch
import torch.nn as nn
from pytracking import TensorList
from ltr.models.layers import activation
class GNSteepestDescent(nn.Module):
"""General module for steepest descent based meta learning."""
def __init__(self, residual_module, num_iter=1, compute_losses=False, detach_length=float('Inf'),
parameter_batch_dim=0, residual_batch_dim=0, steplength_reg=0.0,
filter_dilation_factors=None):
super().__init__()
self.residual_module = residual_module
self.num_iter = num_iter
self.compute_losses = compute_losses
self.detach_length = detach_length
self.steplength_reg = steplength_reg
self._parameter_batch_dim = parameter_batch_dim
self._residual_batch_dim = residual_batch_dim
self.filter_dilation_factors = filter_dilation_factors
def _sqr_norm(self, x: TensorList, batch_dim=0):
sum_keep_batch_dim = lambda e: e.sum(dim=[d for d in range(e.dim()) if d != batch_dim])
return sum((x * x).apply(sum_keep_batch_dim))
def _compute_loss(self, res):
return sum((res * res).sum()) / sum(res.numel())
def forward(self, meta_parameter: TensorList, num_iter=None, *args, **kwargs):
# Make sure grad is enabled
torch_grad_enabled = torch.is_grad_enabled()
torch.set_grad_enabled(True)
num_iter = self.num_iter if num_iter is None else num_iter
meta_parameter_iterates = [meta_parameter]
losses = []
for i in range(num_iter):
if i > 0 and i % self.detach_length == 0:
meta_parameter = meta_parameter.detach()
meta_parameter.requires_grad_(True)
# Compute residual vector
r = self.residual_module(meta_parameter, filter_dilation_factors=self.filter_dilation_factors, **kwargs)
if self.compute_losses:
losses.append(self._compute_loss(r))
# Compute gradient of loss
u = r.clone()
g = TensorList(torch.autograd.grad(r, meta_parameter, u, create_graph=True))
# Multiply gradient with Jacobian
h = TensorList(torch.autograd.grad(g, u, g, create_graph=True))
# Compute squared norms
ip_gg = self._sqr_norm(g, batch_dim=self._parameter_batch_dim)
ip_hh = self._sqr_norm(h, batch_dim=self._residual_batch_dim)
# Compute step length
alpha = ip_gg / (ip_hh + self.steplength_reg * ip_gg).clamp(1e-8)
# Compute optimization step
step = g.apply(lambda e: alpha.reshape([-1 if d==self._parameter_batch_dim else 1 for d in range(e.dim())]) * e)
# Add step to parameter
meta_parameter = meta_parameter - step
meta_parameter_iterates.append(meta_parameter)
if self.compute_losses:
losses.append(self._compute_loss(self.residual_module(meta_parameter,
filter_dilation_factors=self.filter_dilation_factors,
**kwargs)))
# Reset the grad enabled flag
torch.set_grad_enabled(torch_grad_enabled)
if not torch_grad_enabled:
meta_parameter.detach_()
for w in meta_parameter_iterates:
w.detach_()
for l in losses:
l.detach_()
return meta_parameter, meta_parameter_iterates, losses
|
187620
|
import argparse
from copy import copy
from functools import partial
from pathlib import Path
from typing import Any
from typing import Dict
from chainer import cuda
from chainer import optimizers
from chainer import training
from chainer.dataset import convert
from chainer.iterators import MultiprocessIterator
from chainer.training import extensions
from utility.chainer_utility import TensorBoardReport
from yukarin.config import create_from_json
from yukarin.dataset import create_cg as create_dataset
from yukarin.model import create
from yukarin.updater import CGUpdater
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()
config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())
# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor_xy, discriminator_x = create(config.model)
predictor_yx, discriminator_y = create(config.model)
models = {
'predictor_xy': predictor_xy,
'predictor_yx': predictor_yx,
'discriminator_x': discriminator_x,
'discriminator_y': discriminator_y,
}
if config.train.pretrained_model is not None:
raise ValueError('cannot set pretrained model')
# dataset
dataset = create_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)
# optimizer
def create_optimizer(model):
cp: Dict[str, Any] = copy(config.train.optimizer)
n = cp.pop('name').lower()
if n == 'adam':
optimizer = optimizers.Adam(**cp)
elif n == 'sgd':
optimizer = optimizers.SGD(**cp)
else:
raise ValueError(n)
optimizer.setup(model)
return optimizer
opts = {key: create_optimizer(model) for key, model in models.items()}
# updater
converter = partial(convert.concat_examples, padding=0)
updater = CGUpdater(
loss_config=config.loss,
predictor_xy=predictor_xy,
predictor_yx=predictor_yx,
discriminator_x=discriminator_x,
discriminator_y=discriminator_y,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)
# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')
trigger_stop = (config.train.stop_iteration, 'iteration') if config.train.stop_iteration is not None else None
trainer = training.Trainer(updater, stop_trigger=trigger_stop, out=arguments.output)
ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)
trainer.extend(extensions.dump_graph('predictor_xy/loss'))
ext = extensions.snapshot_object(predictor_xy, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)
trainer.extend(extensions.LogReport(trigger=trigger_log))
trainer.extend(TensorBoardReport(), trigger=trigger_log)
if trigger_stop is not None:
trainer.extend(extensions.ProgressBar(trigger_stop))
trainer.run()
|
187641
|
import codecs
import os
from random import randint, shuffle, sample, random
from tqdm import tqdm
from dialogentail.reader.swag_reader import SwagReader
from dialogentail.util import stopwatch, nlp
dull_responses = [
"I don't know.",
"I don't know what you're talking about.",
"I don't know what you mean.",
"I don't know what you mean by that.",
"I don't know what you mean by that answer.",
"I'm not sure.",
"I'm not sure what you're saying.",
"I'm not sure what you're talking about.",
"I'm not sure what you're trying to say.",
"I'm not sure what you mean by that.",
"I'm not sure what you mean by this.",
"I'm not sure what you mean.",
"I don't understand what you mean.",
]
@stopwatch.profile
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('swagfile', type=str, help='swag csv file')
args = parser.parse_args()
basedir, filename = os.path.split(args.swagfile)
output = os.path.join(basedir, f"dial_{filename}.jsonl")
spacy = nlp.Spacy()
with codecs.getwriter("utf-8")(open(output, "wb")) as out_file:
line, rand_line, dull_line = 0, 0, 0
for id, sentence1, distractors, sentence2 in tqdm(SwagReader(args.swag_file)):
if sentence2:
out_file.write('{{"pairID": "{}0", "gold_label": "coherent", '
'"sentence1": "{}", "sentence2": "{}"}}\n'.format(id, sentence1, sentence2))
for p, poor_sent in enumerate(distractors):
out_file.write('{{"pairID": "{id}{seq}", "gold_label": "{gold}", '
'"sentence1": "{s1}", "sentence2": "{s2}"}}\n'.format(id=id, seq=p + 1,
gold="" if sentence2 is None else "poor",
s1=sentence1, s2=poor_sent))
if sentence2:
bad_size = randint(0, len(distractors) // 2)
rand_line += bad_size
if bad_size > 0:
tokens = set()
for p in distractors:
tokens.update(spacy.word_tokenize(p))
for i in range(bad_size):
utter_size = randint(5, len(tokens) // 2)
shuffled_list = list(tokens)
shuffle(shuffled_list)
out_file.write('{{"pairID": "{}1{}", "gold_label": "bad", '
'"sentence1": "{}", "sentence2": "{}"}}\n'
.format(id, i + 1, sentence1, ' '.join(shuffled_list[:utter_size])))
dull_size = 0 if random() <= 0.5 else 1
dull_line += dull_size
if dull_size > 0:
dull_utterances = sample(dull_responses, dull_size)
for i, dull_utter in enumerate(dull_utterances):
out_file.write('{{"pairID": "{}3{}", "gold_label": "poor", '
'"sentence1": "{}", "sentence2": "{}"}}\n'
.format(id, i + 1, sentence1, dull_utter))
print(f"{line} lines + {rand_line} random lines + {dull_line} dull lines exported")
if __name__ == "__main__":
main()
|
187649
|
import getpass
import json
import pathlib
import random
import time
from typing import List
import torch
import numpy as np
from src.utils.log import create_base_logger, create_logdir
from src.utils import measure_runtime, get_git_version
from .experiment import Experiment
class ExperimentSet:
def __init__(self, **kwargs):
self.config = kwargs
self.name = self.config["name"]
self.experiment_config = self.config["experiment"]
self.seeds: List[int] = self.config["seeds"]
self.remote: bool = self.config.get("remote", False)
self.logdir = create_logdir(f"learning_{self.name}")
self.create_set_info()
self.datadir = pathlib.Path("data")
self.datadir.mkdir(parents=True, exist_ok=True)
self.logger = create_base_logger(self.logdir, name="experiment_set")
self.device = torch.device("cuda" if (self.config.get("cuda", True) and torch.cuda.is_available()) else "cpu")
self.logger.info(f"Using device {self.device}")
def run(self):
with measure_runtime(self.logdir):
for seed in self.seeds:
torch.cuda.empty_cache()
self.logger.info(f"Seed {seed} used to run experiment")
self.set_random_seeds(seed)
exp_logdir: pathlib.Path = self.create_experiment_logdir(seed)
experiment: Experiment = Experiment(seed=seed, logdir=exp_logdir, datadir=self.datadir,
set_name=self.name, device=self.device,
remote=self.remote, **self.experiment_config)
experiment.run()
experiment.plot()
@staticmethod
def set_random_seeds(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_experiment_logdir(self, seed: int) -> pathlib.Path:
"""
creates a logdir for an Experiment. These logdirs are contained within the ExperimentSet logdir.
this function also fills the created dir with some basic information about the experiment such as the
config used and the random seed that was set.
:param seed:
:return: the created logdir that should be passed to the Experiment
"""
exp_logdir: pathlib.Path = self.logdir / f"seed_{seed}"
# Check that this seed has not been used before
assert not exp_logdir.exists()
exp_logdir.mkdir(exist_ok=False, parents=False)
with open(str(exp_logdir / "config.json"), "w") as fp:
json.dump(self.experiment_config, fp, indent=4)
with open(str(exp_logdir / "seed.json"), "w") as fp:
json.dump({"seed": seed}, fp, indent=4)
with open(str(exp_logdir / "git_version.json"), "w") as fp:
json.dump({"version": get_git_version()}, fp, indent=4)
return exp_logdir
def create_set_info(self):
with open(str(self.logdir / "config.json"), "w") as fp:
json.dump(self.config, fp, indent=4)
with open(self.logdir / "set.json", "w")as fp:
json.dump({"name": self.name,
"user": getpass.getuser(),
"experiment_config": self.experiment_config,
"seeds": self.seeds,
"start_time": time.time()
}, fp)
|
187689
|
from typing import List
class TwoSum:
def __call__(self, xs: List[int], lookup: int) -> List[int]:
"""
Coderbyte: Two sum problem
Origin: https://coderbyte.com/algorithm/two-sum-problems
Classic Two Sum problem.
:param xs: list to perform lookup
:param lookup: an integer value to sum with
:return: list of pairs discovered
"""
cache = {}
sums = []
for x in xs:
desired = lookup - x
if cache.get(desired, False):
sums.append([x, desired])
cache[x] = x
return sums
|
187700
|
import random
from typing import Dict
from spaceopt import SpaceOpt
random.seed(123456)
def search_space() -> Dict[str, list]:
return {
"a": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
"b": [-5.5, -4.4, -3.3, -2.2, -1.1, 0.0, 1.1, 2.2, 3.3, 4.4, 5.5],
"c": [128, 256, 512, 1024],
"d": ["ABC", "IJK", "XYZ"],
"e": [True, False],
"f": [10000],
}
def test_SpaceOpt__init__() -> None:
SpaceOpt(search_space=search_space(), target_name="y", objective="min")
SpaceOpt(search_space=search_space(), target_name="y", objective="max")
SpaceOpt(search_space=search_space(), target_name="score", objective="maximize")
SpaceOpt(search_space=search_space(), target_name="max", objective="minimize")
try:
SpaceOpt(search_space=search_space(), target_name=1, objective="min")
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"target_name is of type <class 'int'>,"
" but it should be of type <class 'str'>."
)
try:
SpaceOpt(search_space=search_space(), target_name=None, objective="min")
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"target_name is of type <class 'NoneType'>,"
" but it should be of type <class 'str'>."
)
try:
SpaceOpt(search_space=search_space(), target_name="", objective="min")
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "target_name is empty."
try:
SpaceOpt(search_space=search_space(), target_name="y", objective="maxi")
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == (
"objective should be one of: ('maximize', 'minimize', 'max', 'min')."
)
try:
SpaceOpt(search_space=search_space(), target_name="b", objective="min")
except Exception as e:
assert isinstance(e, RuntimeError)
assert str(e) == (
"target_name='b' should not be in search space variables:"
" ['a', 'b', 'c', 'd', 'e', 'f']."
)
def test_SpaceOpt_append_evaluated_spoint() -> None:
spaceopt = SpaceOpt(search_space=search_space(), target_name="y", objective="min")
try:
spaceopt.append_evaluated_spoint(["a", "b", "c", "d", "e"])
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"evaluated_spoint is of type <class 'list'>,"
" but it should be of type <class 'dict'>."
)
try:
spaceopt.append_evaluated_spoint(
{"a": 16, "b": 3.3, "c": 512, "d": "ABC", "e": False, "f": 10000}
)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == (
"spoint={'a': 16, 'b': 3.3, 'c': 512, 'd': 'ABC', 'e': False, 'f': 10000}"
" is not evaluated, target_name='y' is not found."
)
try:
spaceopt.append_evaluated_spoint(
{"a": 16, "b": 3.3, "c": 512, "d": "ABC", "e": False, "f": 10000, "y": 1}
)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"evaluated_spoint has 'y' with value=1 of type <class 'int'>,"
" but it should be of type <class 'float'>."
)
spaceopt.append_evaluated_spoint(
{"a": 16, "b": 3.3, "c": 512, "d": "ABC", "e": False, "f": 10000, "y": 1.0}
)
def test_SpaceOpt_get_random() -> None:
spaceopt = SpaceOpt(search_space=search_space(), target_name="y", objective="min")
try:
spaceopt.get_random(num_spoints=1.0)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"num_spoints is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt.get_random(num_spoints=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "num_spoints should be greater than 0."
try:
spaceopt.get_random(sample_size=100.0)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"sample_size is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt.get_random(sample_size=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "sample_size should be greater than 0."
spoints = spaceopt.get_random(num_spoints=5, sample_size=1000)
assert isinstance(spoints, list)
assert len(spoints) == 5
spoint = spaceopt.get_random(num_spoints=1, sample_size=10)
assert isinstance(spoint, dict)
assert set(spoint.keys()) == set(spoints[-1].keys())
def test_SpaceOpt_fit_predict() -> None:
spaceopt = SpaceOpt(search_space=search_space(), target_name="y", objective="min")
try:
spaceopt.fit_predict(num_spoints=2.0)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"num_spoints is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt.fit_predict(num_spoints=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "num_spoints should be greater than 0."
try:
spaceopt.fit_predict(num_boost_round=2048.0)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"num_boost_round is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt.fit_predict(num_boost_round=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "num_boost_round should be greater than 0."
try:
spaceopt.fit_predict(sample_size=100.0)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"sample_size is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt.fit_predict(sample_size=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "sample_size should be greater than 0."
for i in range(10):
spoint = spaceopt.fit_predict(
num_spoints=1, num_boost_round=100, sample_size=100
)
assert isinstance(spoint, dict)
spoint["y"] = random.uniform(-1, 1)
spaceopt.append_evaluated_spoint(spoint)
def test_SpaceOpt__sample_random_spoints() -> None:
spaceopt = SpaceOpt(search_space=search_space(), target_name="y", objective="min")
try:
spaceopt._sample_random_spoints(sample_size=100.0)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"sample_size is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt._sample_random_spoints(sample_size=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "sample_size should be greater than 0."
spoints = spaceopt._sample_random_spoints(sample_size=10)
assert isinstance(spoints, list)
assert len(spoints) == 10
spoints = spaceopt._sample_random_spoints(sample_size=1)
assert isinstance(spoints, list)
assert len(spoints) == 1
def test_SpaceOpt__sample_unevaluated_unique_spoints() -> None:
spaceopt = SpaceOpt(search_space=search_space(), target_name="y", objective="min")
try:
spaceopt._sample_unevaluated_unique_spoints(sample_size=100.0)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"sample_size is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt._sample_unevaluated_unique_spoints(sample_size=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "sample_size should be greater than 0."
try:
spaceopt._sample_unevaluated_unique_spoints(
sample_size=100, max_num_retries=10.0
)
except Exception as e:
assert isinstance(e, TypeError)
assert str(e) == (
"max_num_retries is of type <class 'float'>,"
" but it should be of type <class 'int'>."
)
try:
spaceopt._sample_unevaluated_unique_spoints(sample_size=1, max_num_retries=0)
except Exception as e:
assert isinstance(e, ValueError)
assert str(e) == "max_num_retries should be greater than 0."
spaceopt = SpaceOpt(
search_space={"w": ["W1", "W2"], "b": [5]}, target_name="y", objective="min"
)
spaceopt.append_evaluated_spoint({"w": "W1", "b": 5, "y": 0.1})
spoints = spaceopt._sample_unevaluated_unique_spoints(
sample_size=100, max_num_retries=100
)
assert len(spoints) == 1
assert spoints[0]["w"] == "W2"
spaceopt.append_evaluated_spoint({"w": "W2", "b": 5, "y": 0.2})
try:
spaceopt._sample_unevaluated_unique_spoints(
sample_size=100, max_num_retries=100
)
except Exception as e:
assert isinstance(e, RuntimeError)
assert str(e) == (
"could not sample any new spoints -"
" search_space is fully explored or random sampling was unfortunate."
"\nsearch_space.size = 2"
"\nnum evaluated spoints = 2"
"\nnum unevaluated spoints = 0"
)
def test_SpaceOpt__str__() -> None:
spaceopt = SpaceOpt(search_space=search_space(), target_name="y", objective="min")
assert str(spaceopt) == (
"SpaceOpt(\n"
" Space(\n"
" Variable(\n"
" name='a',\n"
" values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],\n"
" vtype=<class 'int'>,\n"
" is_categorical=False\n"
" ),\n"
" Variable(\n"
" name='b',\n"
" values=[-5.5, -4.4, -3.3, -2.2, -1.1, 0.0, 1.1, 2.2, 3.3, 4.4, 5.5],\n"
" vtype=<class 'float'>,\n"
" is_categorical=False\n"
" ),\n"
" Variable(\n"
" name='c',\n"
" values=[128, 256, 512, 1024],\n"
" vtype=<class 'int'>,\n"
" is_categorical=False\n"
" ),\n"
" Variable(\n"
" name='d',\n"
" values=['ABC', 'IJK', 'XYZ'],\n"
" vtype=<class 'str'>,\n"
" is_categorical=True\n"
" ),\n"
" Variable(\n"
" name='e',\n"
" values=[True, False],\n"
" vtype=<class 'bool'>,\n"
" is_categorical=False\n"
" ),\n"
" Variable(\n"
" name='f',\n"
" values=[10000],\n"
" vtype=<class 'int'>,\n"
" is_categorical=False\n"
" ),\n"
" size=5544\n"
" ),\n"
" target_name='y',\n"
" objective=min\n"
")"
)
|
187749
|
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
class AuthBackendTests(TestCase):
def setUp(self):
self.existing_user = User.objects.create_user(username='test',
email='<EMAIL>', password='password')
def tearDown(self):
self.existing_user.delete()
def test_without_email_auth_backend(self):
user = authenticate(username='test2', password='<PASSWORD>')
self.assertEqual(user, None)
user = authenticate(username='test', password='password')
self.assertEqual(user, self.existing_user)
user = authenticate(username='<EMAIL>', password='password')
self.assertEqual(user, None)
@override_settings(AUTHENTICATION_BACKENDS=[
'userprofiles.auth_backends.EmailOrUsernameModelBackend'])
def test_with_email_auth_backend(self):
user = authenticate(username='test2', password='<PASSWORD>')
self.assertEqual(user, None)
user = authenticate(username='test', password='password')
self.assertEqual(user, self.existing_user)
user = authenticate(username='<EMAIL>', password='password')
self.assertEqual(user, self.existing_user)
|
187767
|
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
from .utils import *
from .model import Model
from .writer import Writer
from .data_loader import get_loader
from .radam import RAdam
|
187797
|
import gym
import numpy as np
from gym import spaces
class NormalizedActionWrapper(gym.ActionWrapper):
"""Environment wrapper to normalize the action space to [-scale, scale]
Args:
env (gym.env): OpenAI Gym environment to wrap around
scale (float): Scale for normalizing action. Default: 1.0.
References:
https://github.com/tristandeleu/pytorch-maml-rl
"""
def __init__(self, env, scale=1.0):
super(NormalizedActionWrapper, self).__init__(env)
self.scale = scale
self.action_space = spaces.Box(low=-scale, high=scale, shape=self.env.action_space.shape)
def action(self, action):
# Clip the action in [-scale, scale]
action = np.clip(action, -self.scale, self.scale)
# Map normalized action to original action space
lb, ub = self.env.action_space.low, self.env.action_space.high
if np.all(np.isfinite(lb)) and np.all(np.isfinite(ub)):
action = lb + (action + self.scale) * (ub - lb) / (2 * self.scale)
action = np.clip(action, lb, ub)
else:
raise ValueError("Invalid value in action space")
return action
|
187834
|
import unittest
from katas.beta.sort_array_by_last_character import sort_me
class SortMeTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(sort_me(['acvd', 'bcc']), ['bcc', 'acvd'])
def test_equals_2(self):
self.assertEqual(sort_me([
'asdf', 14, '13', 'asdf']), ['13', 14, 'asdf', 'asdf'])
|
187839
|
from typing import Sequence, Union, Optional
from jina.executors.evaluators.rank import BaseRankingEvaluator
class ReciprocalRankEvaluator(BaseRankingEvaluator):
"""
Gives score as per reciprocal rank metric.
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def evaluate(self, actual: Sequence[Union[str, int]], desired: Sequence[Union[str, int]], *args, **kwargs) -> float:
"""
Evaluate score as per reciprocal rank metric.
:param actual: Sequence of sorted document IDs.
:param desired: Sequence of sorted relevant document IDs
(the first is the most relevant) and the one to be considered.
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
:return: Reciprocal rank score
"""
if len(actual) == 0 or len(desired) == 0:
return 0.0
try:
return 1.0 / (actual.index(desired[0]) + 1)
except:
return 0.0
|
187852
|
from keepachangelog.version import __version__
from keepachangelog._changelog import to_dict, to_raw_dict, release, from_dict
from keepachangelog._versioning import to_sorted_semantic
|
187856
|
from __future__ import annotations
import enum
class WitnessScope(enum.IntEnum):
# Indicates that no contract was witnessed. Only sign the transaction.
_None = 0
# Indicates that the calling contract must be the entry contract. The witness/permission/signature
# given on first invocation will automatically expire if entering deeper internal
# invokes. This can be the default safe choice for native NEO/GAS (previously used
# on Neo 2 as "attach" mode).
CalledByEntry = 1
# Custom hash for contract-specific.
CustomContracts = 16
# Custom pubkey for group members.
CustomGroups = 32
# This allows the witness in all contexts (default Neo2 behavior).
Global = 128
def neo_name(self) -> str:
if self is WitnessScope._None:
return 'None'
else:
return self.name
@staticmethod
def get_from_neo_name(neo_name: str) -> WitnessScope:
if neo_name == 'None':
return WitnessScope._None
elif neo_name != WitnessScope._None.name:
return WitnessScope[neo_name]
|
187862
|
import pandas as pd
synonyms_dict = pd.read_csv('mark2cure/analysis/data/synonym_dictionary.txt', sep='\t', names=['dirty', 'clean'], index_col='dirty').to_dict()['clean']
|
187873
|
from Stephanie.configurer import config
# noinspection SpellCheckingInspection
class AudioRecognizer:
def __init__(self, recognizer, UnknownValueError, RequestError):
self.UnknownValueError = UnknownValueError
self.RequestError = RequestError
self.r = recognizer
self.c = config
def recognize_from_sphinx(self, audio):
# recognize speech using Sphinx
try:
text = self.r.recognize_sphinx(audio)
print("Sphinx thinks you said " + text)
return text
except self.UnknownValueError:
print("Sphinx could not understand audio")
return False
except self.RequestError as e:
print("Sphinx error; {0}".format(e))
return False
def recognize_from_google(self, audio):
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
text = self.r.recognize_google(audio)
print("Google Speech Recognition thinks you said " + text)
return text
except KeyError:
print("Google Recognition couldn't understand your audio with enough confidence.")
except self.UnknownValueError:
print("Google Speech Recognition could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return False
def recognize_from_google_cloud(self, audio):
# recognize speech using Google Cloud Speech
try:
google_cloud_speech_credentials = self.c.config['STT_KEYS']['google_cloud_speech_api']
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_google_cloud(audio,
credentials_json=google_cloud_speech_credentials)
print("Google Cloud Speech thinks you said " + text)
return text
except self.UnknownValueError:
print("Google Cloud Speech could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Google Cloud Speech service; {0}".format(e))
return False
def recognize_from_wit(self, audio):
# recognize speech using Wit.ai
try:
wit_ai_key = self.c.config['STT_KEYS'][
'wit.ai_speech_api'] # Wit.ai keys are 32-character uppercase alphanumeric strings
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_wit(audio, key=wit_ai_key)
print("Wit.ai thinks you said " + text)
return text
except self.UnknownValueError:
print("Wit.ai could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Wit.ai service; {0}".format(e))
return False
def recognize_from_bing(self, audio):
# recognize speech using Microsoft Bing Voice Recognition
# Microsoft Bing Voice Recognition API keys 32-character lowercase hexadecimal strings
try:
bing_key = self.c.config['STT_KEYS']['bing_speech_api']
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_bing(audio, key=bing_key)
print("Microsoft Bing Voice Recognition thinks you said " + text)
return text
except self.UnknownValueError:
print("Microsoft Bing Voice Recognition could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
return False
def recognize_from_houndify(self, audio):
# recognize speech using Houndify
try:
houndify_client_id = self.c.config['STT_KEYS'][
'houndify_client_id'] # Houndify client IDs are Base64-encoded strings
houndify_client_key = self.c.config['STT_KEYS'][
'houndify_client_key'] # Houndify client keys are Base64-encoded strings
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_houndify(audio, client_id=houndify_client_id,
client_key=houndify_client_key)
print("Houndify thinks you said " + text)
return text
except self.UnknownValueError:
print("Houndify could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Houndify service; {0}".format(e))
return False
def recognize_from_ibm(self, audio):
# recognize speech using IBM Speech to Text
try:
# IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
ibm_username = self.c.config['STT_KEYS']['ibm_username']
# IBM Speech to Text passwords are mixed-case alphanumeric strings
ibm_password = self.c.config['STT_KEYS']['ibm_password']
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_ibm(audio, username=ibm_username,
password=<PASSWORD>)
print("IBM Speech to Text thinks you said " + text)
return text
except self.UnknownValueError:
print("IBM Speech to Text could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from IBM Speech to Text service; {0}".format(e))
return False
|
187951
|
from bip_utils.monero.mnemonic.monero_mnemonic_ex import MoneroChecksumError
from bip_utils.monero.mnemonic.monero_mnemonic import (
MoneroLanguages, MoneroWordsNum, MoneroMnemonic, MoneroMnemonicDecoder, MoneroMnemonicEncoder
)
from bip_utils.monero.mnemonic.monero_entropy_generator import MoneroEntropyBitLen, MoneroEntropyGenerator
from bip_utils.monero.mnemonic.monero_mnemonic_generator import MoneroMnemonicGenerator
from bip_utils.monero.mnemonic.monero_mnemonic_validator import MoneroMnemonicValidator
from bip_utils.monero.mnemonic.monero_seed_generator import MoneroSeedGenerator
|
187975
|
import pandas as pd
url = "https://finance.naver.com/item/sise_day.nhn?code=066570"
df = pd.read_html(url)
print(df[0])
|
188007
|
import os
import pymongo
# Connect to database
circuit_db_client = pymongo.MongoClient(os.environ['CIRCUIT_DB'])
circuit_db = circuit_db_client.va_circuit_court_cases
district_db_client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
district_db = district_db_client.va_district_court_cases
print 'DISTRICT COURT'
for court in district_db.courts.find():
print court['name']
court['total_count'] = district_db.cases.count({
'FIPSCode': court['fips_code']
})
court['collected_count'] = district_db.cases.count({
'FIPSCode': court['fips_code'],
'date_collected': {'$exists': True}
})
district_db.courts.replace_one({
'_id': court['_id']
}, court)
print 'CIRCUIT COURT'
for court in circuit_db.courts.find():
print court['name']
court['total_count'] = circuit_db.cases.count({
'FIPSCode': court['fips_code'],
'CaseNumber': {'$regex': '^CR1[0-4].*'}
})
court['collected_count'] = circuit_db.cases.count({
'FIPSCode': court['fips_code'],
'CaseNumber': {'$regex': '^CR1[0-4].*'},
'date_collected': {'$exists': True}
})
circuit_db.courts.replace_one({
'_id': court['_id']
}, court)
|
188024
|
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
def heatmap(x, y,
freq_labels = 1,
show_grid = True,
invert_yaxis = False,
**kwargs,
):
color = kwargs.get('color',
[1]*len(x),
)
color_min, color_max = kwargs.get('color_range',
(min(color), max(color)),
)
size = kwargs.get('size',
[1]*len(x),
)
size_min, size_max = kwargs.get('size_range',
(min(size), max(size)),
)[:2]
size_scale = kwargs.get('size_scale', 500)
marker = kwargs.get('marker',
's',
)
if 'palette' in kwargs:
palette = kwargs['palette']
n_colors = len(palette)
else:
n_colors = 256 # Use 256 colors for the diverging color palette
palette = sns.color_palette("Blues", n_colors)
def value_to_color(val):
if color_min == color_max:
return palette[-1]
else:
val_position = float((val - color_min)) / (color_max - color_min) # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
ind = int(val_position * (n_colors - 1)) # target index in the color palette
return palette[ind]
def value_to_size(val):
if size_min == size_max:
return 1 * size_scale
else:
val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position,
0,
),
1,
) # bound the position betwen 0 and 1
return val_position * size_scale
if 'x_order' in kwargs:
x_names = [t
for t in kwargs['x_order']
]
else:
x_names = [t
for t in list(set([v
for v in x
]))
]
x_to_num = {p[1]:p[0]
for p in enumerate(x_names)
}
if 'y_order' in kwargs:
y_names = [t
for t in kwargs['y_order']
]
else:
y_names = [t
for t in list(set([v for v in y]))
]
y_to_num = {p[1]:p[0]
for p in enumerate(y_names)
}
plot_grid = plt.GridSpec(1,
15,
hspace = 0.2,
wspace = 0.1,
)
ax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot
ax.invert_yaxis()
kwargs_pass_on = {k:v
for k,v in kwargs.items()
if k not in ['color',
'palette',
'color_range',
'size',
'size_range',
'size_scale',
'marker',
'x_order',
'y_order',
'invert_yaxis',
'show_grid',
]
}
ax.scatter(x = [x_to_num[v] for v in x],
y = [y_to_num[v] for v in y],
marker = marker,
s = [value_to_size(v) for v in size],
c = [value_to_color(v) for v in color],
**kwargs_pass_on
)
if freq_labels:
ax.set_xticks([v for k,v in x_to_num.items()][::freq_labels])
ax.set_xticklabels([k for k in x_to_num][::freq_labels],
rotation=90, horizontalalignment='right',
)
ax.set_yticks([v for k,v in y_to_num.items()][::freq_labels])
ax.set_yticklabels([k for k in y_to_num][::freq_labels],
#rotation=45, horizontalalignment='right',
)
if show_grid:
ax.grid(False, 'major')
ax.grid(True, 'minor')
else:
ax.grid(False, 'major')
ax.grid(False, 'minor')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.xaxis.tick_top()
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])
if invert_yaxis:
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5][::-1])
else:
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])
ax.set_facecolor('#F1F1F1')
# Add color legend on the right side of the plot
if color_min < color_max:
ax = plt.subplot(plot_grid[:,-1]) # Use the rightmost column of the plot
col_x = [0]*len(palette) # Fixed x coordinate for the bars
bar_y=np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars
bar_height = bar_y[1] - bar_y[0]
ax.barh(y=bar_y,
width=[5]*len(palette), # Make bars 5 units wide
left=col_x, # Make bars start at 0
height=bar_height,
color=palette,
linewidth=0
)
ax.set_xlim(1, 2) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle
ax.grid(False) # Hide grid
ax.set_facecolor('white') # Make background white
ax.set_xticks([]) # Remove horizontal ticks
ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max
ax.yaxis.tick_right() # Show vertical ticks on the right
def corrplot(data,
size_scale = 500,
marker = 's',
):
corr = pd.melt(data.reset_index(),
id_vars = 'index',
)
corr.columns = ['x',
'y',
'value',
]
heatmap(corr['x'],
corr['y'],
color = corr['value'],
color_range = [-1, 1],
palette = sns.diverging_palette(20, 220, n=256),
size = corr['value'].abs(), size_range=[0,1],
marker = marker,
x_order = data.columns,
y_order = data.columns[::-1],
size_scale = size_scale,
)
|
188027
|
import os
import pdb
import pickle
import logging
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from argparse import ArgumentParser
import itertools
from torch.utils.data import random_split
from src.modules.trainer import OCRTrainer
from src.utils.utils import EarlyStopping, gmkdir
from src.models.crnn import CRNN
from src.options.ss_opts import base_opts
from src.data.pickle_dataset import PickleDataset
from src.data.synth_dataset import SynthCollator
from src.criterions.ctc import CustomCTCLoss
from src.utils.top_sampler import SamplingTop
from main import Learner
class LearnerSemi(Learner):
def __init__(self, model, optimizer, savepath=None, resume=False):
self.model = model
self.optimizer = optimizer
self.savepath = os.path.join(savepath, 'finetuned.ckpt')
self.cuda = torch.cuda.is_available()
self.cuda_count = torch.cuda.device_count()
if self.cuda:
self.model = self.model.cuda()
self.epoch = 0
if self.cuda_count > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
self.model = nn.DataParallel(self.model)
self.best_score = None
def freeze(self, index, boolean=False):
layer = self.get_layer_groups()[index]
for params in layer.parameters():
params.requires_grad = boolean
def freeze_all_but(self, index):
n_layers = len(self.get_layer_groups())
for i in range(n_layers):
self.freeze(i)
self.freeze(index, boolean=True)
def unfreeze(self, index):
self.freeze(index, boolean=True)
def unfreeze_all(self):
n_layers = len(self.get_layer_groups())
for i in range(n_layers):
self.unfreeze(i)
def child(self, x):
return list(x.children())
def recursive_(self, child):
if hasattr(child, 'children'):
if len(self.child(child)) != 0:
child = self.child(child)
return self.recursive_(child)
return child
def get_layer_groups(self):
children = []
for child in self.child(self.model):
children.extend(self.recursive_(child))
children = [child for child in children if list(child.parameters())]
return children
if __name__ == '__main__':
parser = ArgumentParser()
base_opts(parser)
args = parser.parse_args()
# Loading souce data
args.imgdir = 'English_consortium'
args.source_data = SynthDataset(args)
args.collate_fn = SynthCollator()
# Loading target data an splitting
# into train and val
args.imgdir = 'English_unannotated'
target_data = SynthDataset(args)
train_split = int(0.8*len(target_data))
val_split = len(target_data) - train_split
args.data_train, args.data_val = random_split(target_data, (train_split, val_split))
args.alphabet = """Only thewigsofrcvdampbkuq.$A-210xT5'MDL,RYHJ"ISPWENj&BC93VGFKz();#:!7U64Q8?+*ZX/%"""
args.nClasses = len(args.alphabet)
model = CRNN(args)
model = model.cuda()
args.criterion = CustomCTCLoss()
savepath = os.path.join(args.save_dir, args.name)
gmkdir(savepath)
gmkdir(args.log_dir)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# Loading specific model to get top samples
resume_file = savepath + '/' + 'best.ckpt'
print('Loading model %s'%resume_file)
checkpoint = torch.load(resume_file)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['opt_state_dict'])
# Generating top samples
args.model = model
args.imgdir = 'target_top'
finetunepath = args.path + '/' + args.imgdir
gmkdir(finetunepath)
sampler = SamplingTop(args)
sampler.get_samples(train_on_pred=args.train_on_pred,
combine_scoring=args.combine_scoring)
# Joining source and top samples
args.top_samples = SynthDataset(args)
args.data_train = torch.utils.data.ConcatDataset([args.source_data, args.top_samples])
print('Traininig Data Size:{}\nVal Data Size:{}'.format(
len(args.data_train), len(args.data_val)))
learner = LearnerSemi(args.model, optimizer, savepath=savepath, resume=args.resume)
learner.fit(args)
shutil.rmtree(finetunepath)
|
188061
|
from model import *
from config import *
import torch.optim as optim
from collections import OrderedDict
def load(path):
state_dict = torch.load(path)
state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
state_dict_rename[name] = v
#print(state_dict_rename)
#model.load_state_dict(state_dict_rename)
return state_dict_rename
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),e_extract_layer(),nums =BATCH_SIZE).cuda()
#initialize_weights(D_E)
#D_E.base.load_state_dict(torch.load('../vgg16_feat.pth'))
#print(D_E)
D_E.load_state_dict(load('D:\WRm/checkpoints/D_Eepoch3.pkl'))
D_E =nn.DataParallel(D_E).cuda()
U = D_U().cuda()
#initialize_weights(U)
U.load_state_dict(load('D:\WRm/checkpoints/Uepoch3.pkl'))
U =nn.DataParallel(U)
#D_E.base.load_state_dict(torch.load('/home/neverupdate/Downloads/SalGAN-master/weights/vgg16_feat.pth'))
#D_E.load_state_dict(torch.load('./checkpoints/D_Eepoch3.pkl'))
#U.load_state_dict(torch.load('./checkpoints/Uepoch3.pkl'))
DE_optimizer = optim.Adam(D_E.parameters(), lr=config.D_LEARNING_RATE,betas=(0.5,0.999))
U_optimizer = optim.Adam(U.parameters(), lr=config.U_LEARNING_RATE, betas=(0.5, 0.999))
TR_sal_dirs = [ ("D:\WRM/DUTS/DUTS-TR/DUTS-TR-Image",
"D:\WRM/DUTS/DUTS-TR/DUTS-TR-Mask")]
TR_ed_dir = [("./images/train",
"./bon/train")]
TE_sal_dirs = [("D:\WRM/ECSSD (2)/ECSSD-Image",
"D:\WRM/ECSSD (2)/ECSSD-Mask")]
TE_ed_dir = [("./images/test",
"./bon/test")]
def DATA(sal_dirs,ed_dir,trainable):
S_IMG_FILES = []
S_GT_FILES = []
E_IMG_FILES = []
E_GT_FILES = []
for dir_pair in sal_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
S_IMG_FILES.extend(X)
S_GT_FILES.extend(y)
for dir_pair in ed_dir:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
E_IMG_FILES.extend(X)
E_GT_FILES.extend(y)
S_IMGS_train, S_GT_train = S_IMG_FILES, S_GT_FILES
E_IMGS_train, E_GT_train = E_IMG_FILES, E_GT_FILES
folder = DataFolder(S_IMGS_train, S_GT_train, E_IMGS_train, E_GT_train, trainable)
if trainable:
data = DataLoader(folder, batch_size=BATCH_SIZE, num_workers=2, shuffle=trainable)
else:
data = DataLoader(folder, batch_size=1, num_workers=2, shuffle=trainable)
return data
train_data = DATA(TR_sal_dirs,TR_ed_dir,trainable=True)
test_data = DATA(TE_sal_dirs,TE_ed_dir,trainable=False)
def cal_eLoss(edges,label):
loss = 0
w =[1,1,1,1,1,5]
for i in range(6):
#print(label[i].shape)
#print(edges[i].shape)
loss += w[i]*F.binary_cross_entropy(edges[i],label)/10
return loss
def cal_s_mLoss(maps,label):
loss = 0
w = [1, 1, 1, 1, 1, 1]
for i in range(6):
loss =loss+ w[i]*F.binary_cross_entropy( maps[i],label) / 6
return loss
def cal_s_eLoss(es,label):
loss = 0
w =[1,1,1,1,1]
for i in range(5):
loss =loss+w[i]* F.binary_cross_entropy(es[i],label)/5
return loss
def cal_e_mLoss(e_m,label):
loss=0
w = [1, 1, 1, 1, 1, 1]
for i in range(5):
loss =loss+ w[i] * F.binary_cross_entropy(e_m[i],label) / 5
return loss
def cal_s_e2mLoss(e_m,maps):
loss = 0
w = [1, 1, 1, 1, 1, 1]
for i in range(5):
loss = loss+ w[i] * F.binary_cross_entropy( e_m[i],maps[i]) / 5
return loss
best_eval = None
ma = 0
def main(train_data,test_data):
best_eval = None
ma = 0
for epoch in range(1, NUM_EPOCHS + 1):
sum_train_mae = 0
sum_train_loss = 0
x = 0
##train
for iter_cnt, (img, img_e, sal_l, sal_e, ed_l, name) in enumerate(train_data):
D_E.train()
U.train()
x = x + 1
print('training start!!')
# for iter, (x_, _) in enumerate(train_data):
img = Variable(img.cuda()) # ,Variable(z_.cuda())
img_e = Variable(img_e.cuda())
sal_l = Variable(sal_l.cuda(), requires_grad=False)
sal_e = Variable(sal_e.cuda(), requires_grad=False)
ed_l = Variable(ed_l, requires_grad=False).cuda()
##########DSS#########################
######train dis
dd = True
if dd == True:
##fake
f, edges, e_s, e = D_E(img,img_e)
ff = list()
for i in range(5):
ff.append(f[i].detach())
edges_L = cal_eLoss(edges,ed_l)
e_s_L = cal_e_mLoss(e_s, sal_l)
e_L = cal_s_eLoss(e, sal_e)
#s_m_L = cal_s_mLoss(s, sal_l)
# masks, es = U(f)
# pre_ms_l = 0
# pre_es_l = 0
# ma = torch.abs(sal_l - masks[1]).mean()
# pre_m_l = F.binary_cross_entropy(masks[1], sal_l)
# for i in range(2):
# pre_ms_l += F.binary_cross_entropy(masks[1], sal_l)
# pre_es_l += F.binary_cross_entropy(es[1], sal_e)
DE_optimizer.zero_grad()
DE_l_1 = 5 * e_s_L + 10*e_L + 5*edges_L
DE_l_1.backward()
DE_optimizer.step()
uu = True
if uu == True:
masks, es = U(ff)
# mmm = masks[2].detach().cpu().numpy()
# print(mmm.shape)
# mmmmm = Image.fromarray(mmm[0,0,:,:])
# mmmmm.save('1.png')
# cv2.imshow('1.png',mmm[0,0,:,:]*255)
# cv2.waitKey()
pre_ms_l = 0
pre_es_l = 0
ma = torch.abs(sal_l - masks[2]).mean()
# print(ma)
pre_m_l = F.binary_cross_entropy(masks[2], sal_l)
for i in range(2):
pre_ms_l += F.binary_cross_entropy(masks[i], sal_l)
pre_es_l += F.binary_cross_entropy(es[i], sal_e)
U_l_1 = 50 * pre_m_l + 10 * pre_es_l + pre_ms_l
U_optimizer.zero_grad()
U_l_1.backward()
U_optimizer.step()
sum_train_mae += float(ma)
print(
"Epoch:{}\t iter:{} sum:{} \t mae:{}".format(epoch, x, len(train_data), sum_train_mae / (iter_cnt + 1)))
##########save model
# torch.save(D.state_dict(), './checkpoint/DSS/with_e_2/D15epoch%d.pkl' % epoch)
torch.save(D_E.state_dict(), 'D:\WRM/checkpoints/D_Eepoch%d.pkl' % epoch)
torch.save(U.state_dict(), 'D:\WRM/checkpoints/Uepoch%d.pkl' % epoch)
print('model saved')
###############test
eval1 = 0
eval2 = 0
t_mae = 0
for iter_cnt, (img, img_e, sal_l, sal_e, ed_l, name) in enumerate(test_data):
D_E.eval()
U.eval()
label_batch = Variable(sal_l).cuda()
img_eb = Variable(img_e).cuda()
print('val!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img.cuda()) # ,Variable(z_.cuda())
f, edges, e_s, e = D_E(img_batch,img_eb)
masks, es = U(f)
mae_v2 = torch.abs(label_batch - masks[2]).mean().data[0]
# eval1 += mae_v1
eval2 += mae_v2
# m_eval1 = eval1 / (iter_cnt + 1)
m_eval2 = eval2 / (iter_cnt + 1)
print("test mae", m_eval2)
with open('results1.txt', 'a+') as f:
f.write(str(epoch) + " 2:" + str(m_eval2) + "\n")
if __name__ == '__main__':
main(train_data,test_data)
|
188107
|
import alpenglow.Getter as rs
import alpenglow as prs
class SvdppExperiment(prs.OnlineExperiment):
"""SvdppExperiment(begin_min=-0.01,begin_max=0.01,dimension=10,learning_rate=0.05,negative_rate=20,use_sigmoid=False,norm_type="exponential",gamma=0.8,user_vector_weight=0.5,history_weight=0.5)
This class implements an online version of the SVD++ model [Koren2008]_
The model is able to train on implicit data using negative sample generation, see [X.He2016]_ and the **negative_rate** parameter.
We apply a decay on the user history, the weight of the older items is smaller.
.. [Koren2008] <NAME>, “Factorization Meets the Neighborhood: A Multifaceted Collaborative Filtering Model,” Proc. 14th ACM SIGKDD Int’l Conf. Knowledge Discovery and Data Mining, ACM Press, 2008, pp. 426-434.
Parameters
----------
begin_min : double
The factors are initialized randomly, sampling each element uniformly from the interval (begin_min, begin_max).
begin_max : double
See begin_min.
dimension : int
The latent factor dimension of the factormodel.
learning_rate : double
The learning rate used in the stochastic gradient descent updates.
negative_rate : int
The number of negative samples generated after each update. Useful for implicit recommendation.
norm_type : string
Normalization variants.
gamma : double
The constant in the decay function.
user_vector_weight : double
The user is modeled with a sum of a user vector and a combination of item vectors. The weight of the two part can be set using these parameters.
history_weight : double
See user_vector_weight.
"""
def _config(self, top_k, seed):
model = rs.SvdppModel(**self.parameter_defaults(
begin_min=-0.01,
begin_max=0.01,
dimension=10,
use_sigmoid=False,
norm_type="exponential",
gamma=0.8,
user_vector_weight=0.5,
history_weight=0.5,
initialize_all=False
))
gradient_updater = rs.SvdppModelGradientUpdater(**self.parameter_defaults(
learning_rate=0.05,
cumulative_item_updates=False,
))
gradient_updater.set_model(model)
simple_updater = rs.SvdppModelUpdater()
simple_updater.set_model(model)
point_wise = rs.ObjectiveMSE()
gradient_computer = rs.GradientComputerPointWise()
gradient_computer.set_objective(point_wise)
gradient_computer.set_model(model)
gradient_computer.add_gradient_updater(gradient_updater)
negative_sample_generator = rs.UniformNegativeSampleGenerator(**self.parameter_defaults(
negative_rate=20,
initialize_all=False,
seed=928357823,
))
negative_sample_generator.add_updater(gradient_computer)
return (model, [negative_sample_generator, simple_updater], [])
|
188132
|
import gym
import numpy as np
from rltf.agents import LoggingAgent
from rltf.memory import PGBuffer
from rltf.monitoring import Monitor
class AgentPG(LoggingAgent):
def __init__(self,
env_maker,
model,
gamma,
lam,
rollout_len,
stop_step,
vf_iters=1,
stack_frames=3,
**agent_kwargs
):
"""
Args:
env_maker: callable. Function that takes the mode of an env and retruns a new environment instance
gamma: float. Discount factor for GAE(gamma, lambda)
lam: float. Lambda value for GAE(gamma, lambda)
rollout_len: int. Number of agent steps before taking a policy gradient step
stop_step: int. Total number of agent steps
vf_iters: int. Number of value function training steps in a single epoch
"""
super().__init__(**agent_kwargs)
assert self.log_period % rollout_len == 0, "Log period must be divisible by rollout length"
if self.eval_len > 0:
assert self.eval_period % rollout_len == 0, "Eval period must be divisible by rollout length"
if self.save_period > np.inf:
assert self.save_period % rollout_len == 0, "Save period must be divisible by rollout length"
self.env_train = Monitor(
env=env_maker('t'),
log_dir=self.model_dir,
mode='t',
log_period=None,
video_spec=self.video_period,
)
self.env_eval = Monitor(
env=env_maker('e'),
log_dir=self.model_dir,
mode='e',
log_period=self.eval_len,
video_spec=self.video_period,
eval_period=self.eval_period,
)
self.rollout_len = rollout_len
self.stop_step = stop_step
self.epochs = self.stop_step // self.rollout_len
self.vf_iters = vf_iters
self.gamma = gamma
self.lam = lam
# Get environment specs
obs_shape, obs_dtype, act_shape, act_dtype, obs_len = self._state_action_spec(stack_frames)
# Initialize the model and the experience buffer
self.model = model(obs_shape=obs_shape, act_space=self.env_train.action_space, **self.model_kwargs)
self.buffer = PGBuffer(self.rollout_len, obs_shape, obs_dtype, act_shape, act_dtype, obs_len)
def _train(self):
# Get the function that generates trajectories
run_policy = self._trajectory_generator(self.rollout_len)
for t in range(self.agent_step+1, self.epochs+1):
if self._terminate:
break
step = t * self.rollout_len
# Collect experience in the environment
run_policy()
# Train the model
self._run_train_step(t)
if step % self.log_period == 0:
self.env_train.monitor.log_stats()
# Stop and run evaluation procedure
if self.eval_len > 0 and step % self.eval_period == 0:
self._eval_agent()
# Update the agent step - corresponds to number of epochs
self.agent_step = step
# Save **after** agent step is correct and completed
if step % self.save_period == 0:
self.save()
def _trajectory_generator(self, horizon):
"""
Args:
horizon: int. Number of steps to run before yielding the trajectories
Returns:
A function which generates trajectories
"""
obs = self.reset()
def run_env():
nonlocal obs
# Clear the buffer to avoid using old data
self.buffer.reset()
for _ in range(horizon):
if self._terminate:
return
# Get an action to run and the value function estimate of this state
action, vf, logp = self._action_train(obs)
# Run action
next_obs, reward, done, info = self.env_train.step(action)
# Store the effect of the action taken upon obs
self.buffer.store(obs, action, reward, done, vf, logp)
# Reset the environment if end of episode
if done:
next_obs = self.reset()
obs = next_obs
# Store the value function for the next state. Needed to compute GAE(lambda)
if not done:
_, next_vf, _ = self._action_train(obs)
else:
next_vf = 0
# Compute GAE(gamma, lambda) and TD(lambda)
self.buffer.compute_estimates(self.gamma, self.lam, next_vf)
return run_env
def _get_feed_dict(self, batch, t):
feed_dict = {
self.model.obs_ph: batch["obs"],
self.model.act_ph: batch["act"],
self.model.adv_ph: batch["adv"],
self.model.ret_ph: batch["ret"],
self.model.old_logp_ph: batch["logp"],
self.model.pi_opt_conf.lr_ph: self.model.pi_opt_conf.lr_value(t),
self.model.vf_opt_conf.lr_ph: self.model.vf_opt_conf.lr_value(t),
}
return feed_dict
def _run_summary_op(self, t, feed_dict):
if t * self.rollout_len % self.log_period == 0:
self.summary = self.sess.run(self.summary_op, feed_dict=feed_dict)
def _run_train_step(self, t):
batch = self.buffer.get_data()
feed_dict = self._get_feed_dict(batch, t)
train_pi = self.model.ops_dict["train_pi"]
train_vf = self.model.ops_dict["train_vf"]
# Run a policy gradient step and a value function training step
self.sess.run([train_pi, train_vf], feed_dict=feed_dict)
# self.sess.run([self.model.train_op], feed_dict=feed_dict)
# Run a policy gradient step
# self.sess.run(train_pi, feed_dict=feed_dict)
# Train the value function additionally
for _ in range(self.vf_iters-1):
if self._terminate:
break
self.sess.run(train_vf, feed_dict=feed_dict)
# Run the summary op to log the changes from the update if necessary
self._run_summary_op(t, feed_dict)
def _action_train(self, state):
data = self.model.action_train_ops(self.sess, state)
action = data["action"][0]
vf = data["vf"][0]
logp = data["logp"][0]
return action, vf, logp
def _action_eval(self, state):
data = self.model.action_eval_ops(self.sess, state)
action = data["action"][0]
return action
def _save_allowed(self):
# Prevent saving if the process was terminated - state is most likely inconsistent
return not self._terminate
def _state_action_spec(self, stack_frames):
assert isinstance(self.env_train.observation_space, gym.spaces.Box)
# Get environment specs
act_shape = list(self.env_train.action_space.shape)
obs_shape = list(self.env_train.observation_space.shape)
# Get obs_shape and obs_dtype
if len(obs_shape) == 3:
assert stack_frames > 1
obs_dtype = np.uint8
obs_len = stack_frames
else:
obs_dtype = np.float32
obs_len = 1
# Get act_shape and act_dtype
if isinstance(self.env_train.action_space, gym.spaces.Box):
act_shape = list(self.env_train.action_space.shape)
act_dtype = np.float32
elif isinstance(self.env_train.action_space, gym.spaces.Discrete):
act_shape = []
act_dtype = np.uint8
else:
raise ValueError("Unsupported action space")
return obs_shape, obs_dtype, act_shape, act_dtype, obs_len
def _reset(self):
pass
def _append_log_spec(self):
return [ ( "pi_learn_rate", "f", self.model.pi_opt_conf.lr_value), ]
|
188329
|
import ast
from boa3.analyser.astanalyser import IAstAnalyser
class ConstructAnalyser(IAstAnalyser, ast.NodeTransformer):
"""
This class is responsible for pre processing Python constructs
The methods with the name starting with 'visit_' are implementations of methods from the :class:`NodeVisitor` class.
These methods are used to walk through the Python abstract syntax tree.
"""
def __init__(self, ast_tree: ast.AST, log: bool = False):
super().__init__(ast_tree, log=log)
self.visit(self._tree)
@property
def tree(self) -> ast.AST:
"""
Gets the analysed abstract syntax tree
:return: the analysed ast
"""
return self._tree
def visit_Call(self, call: ast.Call) -> ast.AST:
"""
Visitor of a function call node
:param call: the python ast function call node
"""
if isinstance(call.func, ast.Attribute):
from boa3.model.builtin.builtin import Builtin
if call.func.attr == Builtin.ScriptHash.identifier:
from boa3.constants import SYS_VERSION_INFO
from boa3.model.type.type import Type
types = {
Type.int.identifier: int,
Type.str.identifier: str,
Type.bytes.identifier: bytes
}
literal: tuple = ((ast.Constant,)
if SYS_VERSION_INFO >= (3, 8)
else (ast.Num, ast.Str, ast.Bytes))
if isinstance(call.func.value, literal) and len(call.args) == 0:
value = ast.literal_eval(call.func.value)
if not isinstance(value, tuple(types.values())):
return call
elif (isinstance(call.func.value, ast.Name) # checks if is the name of a type
and call.func.value.id in types # and if the arguments is from the same type
and len(call.args) == 1
and isinstance(call.args[0], literal)):
value = ast.literal_eval(call.args[0])
if not isinstance(value, (types[call.func.value.id],)):
return call
else:
return call
from boa3.neo import to_script_hash
# value must be bytes
if isinstance(value, int):
from boa3.neo.vm.type.Integer import Integer
value = Integer(value).to_byte_array()
elif isinstance(value, str):
from boa3.neo.vm.type.String import String
value = String(value).to_bytes()
return self.parse_to_node(str(to_script_hash(value)), call)
return call
|
188333
|
from guizero import App
app = App()
app.info("Info", "This is a guizero app")
app.error("Error", "Try and keep these out your code...")
app.warn("Warning", "These are helpful to alert users")
app.display()
|
188381
|
import os
import unittest
import tempfile
import shutil
from faceutils import detect_faces, io, face_landmarks, extract_face_features, features_distance
class RecognitionTest(unittest.TestCase):
"""
Test methods in face recognition
"""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def get_features(self, image):
return extract_face_features(image, face_landmarks(image, detect_faces(image, min_score=1)[0]).shape)
def test_features(self):
image_a = io.load_image("faces/albert-einstein.jpg")
image_b = io.load_image("faces/einstein-laughing.jpg")
features_a = self.get_features(image_a)
features_b = self.get_features(image_b)
distance = features_distance(features_a, features_b)
self.assertTrue(distance < 0.6)
if __name__ == '__main__':
unittest.main()
|
188447
|
import unittest
from types import SimpleNamespace
import pytest
from pyspark.sql import Row
from snorkel.labeling.lf.nlp import NLPLabelingFunction
from snorkel.labeling.lf.nlp_spark import (
SparkNLPLabelingFunction,
spark_nlp_labeling_function,
)
from snorkel.types import DataPoint
def has_person_mention(x: DataPoint) -> int:
person_ents = [ent for ent in x.doc.ents if ent.label_ == "PERSON"]
return 0 if len(person_ents) > 0 else -1
@pytest.mark.spark
class TestNLPLabelingFunction(unittest.TestCase):
def _run_lf(self, lf: SparkNLPLabelingFunction) -> None:
x = Row(num=8, text="The movie is really great!")
self.assertEqual(lf(x), -1)
x = Row(num=8, text="<NAME> acted well.")
self.assertEqual(lf(x), 0)
def test_nlp_labeling_function(self) -> None:
lf = SparkNLPLabelingFunction(name="my_lf", f=has_person_mention)
self._run_lf(lf)
def test_nlp_labeling_function_decorator(self) -> None:
@spark_nlp_labeling_function()
def has_person_mention(x: DataPoint) -> int:
person_ents = [ent for ent in x.doc.ents if ent.label_ == "PERSON"]
return 0 if len(person_ents) > 0 else -1
self.assertIsInstance(has_person_mention, SparkNLPLabelingFunction)
self.assertEqual(has_person_mention.name, "has_person_mention")
self._run_lf(has_person_mention)
def test_spark_nlp_labeling_function_with_nlp_labeling_function(self) -> None:
# Do they have separate _nlp_configs?
lf = NLPLabelingFunction(name="my_lf", f=has_person_mention)
lf_spark = SparkNLPLabelingFunction(name="my_lf_spark", f=has_person_mention)
self.assertEqual(lf(SimpleNamespace(num=8, text="<NAME> acted well.")), 0)
self._run_lf(lf_spark)
|
188500
|
from django import dispatch
presence_changed = dispatch.Signal(
providing_args=["room", "added", "removed", "bulk_change"]
)
|
188506
|
import logging
from typing import List, Dict, Set, Union, cast, Type
import pandas as pd
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.model.NucleotideMutationTranslater import NucleotideMutationTranslater
from genomics_data_index.storage.model.QueryFeature import QueryFeature
from genomics_data_index.storage.model.QueryFeatureHGVS import QueryFeatureHGVS
from genomics_data_index.storage.model.QueryFeatureHGVSGN import QueryFeatureHGVSGN
from genomics_data_index.storage.model.QueryFeatureMLST import QueryFeatureMLST
from genomics_data_index.storage.model.QueryFeatureMutation import QueryFeatureMutation
from genomics_data_index.storage.model.QueryFeatureMutationSPDI import QueryFeatureMutationSPDI
from genomics_data_index.storage.model.db import NucleotideVariantsSamples, Reference, ReferenceSequence, MLSTScheme, \
SampleMLSTAlleles, MLSTAllelesSamples, Sample
from genomics_data_index.storage.model.db import SampleNucleotideVariation
from genomics_data_index.storage.service import DatabaseConnection
from genomics_data_index.storage.service import SQLQueryInBatcherDict, SQLQueryInBatcherList
logger = logging.getLogger(__name__)
class FeatureExplodeUnknownError(Exception):
def __init__(self, msg: str):
super().__init__(msg)
class SampleService:
def __init__(self, database_connection: DatabaseConnection, sql_select_limit: int):
self._connection = database_connection
self._sql_select_limit = sql_select_limit
def get_samples_with_variants(self, reference_name: str) -> List[Sample]:
"""
Gets a list of all samples that have variants associated with the given reference genome name.
:reference_name: The reference genome name.
:return: A list of Samples with variants with respect to the reference genome name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()
return samples
def feature_explode_unknown(self, feature: QueryFeature) -> List[QueryFeature]:
if isinstance(feature, QueryFeatureHGVSGN):
features_spdi = self.find_features_spdi_for_hgvsgn(feature)
if len(features_spdi) == 0:
raise FeatureExplodeUnknownError(f'feature={feature} is of type HGVSGN but the corresponding SPDI '
f'feature does not exist in the database. Cannot convert to unknown '
f'SPDI representation.')
else:
unknown_features = []
for feature in features_spdi:
unknown_features.extend(feature.to_unknown_explode())
return unknown_features
elif isinstance(feature, QueryFeatureHGVS):
if feature.is_nucleotide():
variants_hgvs = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_c == feature.id) \
.all()
elif feature.is_protein():
variants_hgvs = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_p == feature.id) \
.all()
else:
raise Exception(f'feature=[{feature}] is neither nucleotide or protein')
if len(variants_hgvs) == 0:
raise FeatureExplodeUnknownError(f'feature={feature} is of type HGVS but the corresponding SPDI '
f'feature does not exist in the database. Cannot convert to unknown '
f'SPDI representation.')
else:
unknown_features = []
for variants_sample_obj in variants_hgvs:
unknown_features.extend(QueryFeatureMutationSPDI(variants_sample_obj.spdi).to_unknown_explode())
return unknown_features
else:
return feature.to_unknown_explode()
def find_features_spdi_for_hgvsgn(self, feature: QueryFeatureHGVSGN) -> List[QueryFeatureMutationSPDI]:
if not isinstance(feature, QueryFeatureHGVSGN):
raise Exception(f'Cannot handle feature={feature}. Not of type {QueryFeatureHGVSGN.__name__}')
query = self._connection.get_session().query(NucleotideVariantsSamples).filter(
NucleotideVariantsSamples.sequence == feature.sequence)
if feature.has_gene():
query = query.filter(NucleotideVariantsSamples.annotation_gene_name == feature.gene)
if feature.is_nucleotide():
query = query.filter(NucleotideVariantsSamples.annotation_hgvs_c == feature.variant)
elif feature.is_protein():
query = query.filter(NucleotideVariantsSamples.annotation_hgvs_p == feature.variant)
else:
raise Exception(f'feature={feature} is neither protein nor nucleotide')
return [QueryFeatureMutationSPDI(s.spdi) for s in query.all()]
def get_samples_with_mlst_alleles(self, scheme_name: str) -> List[Sample]:
"""
Gets a list of all samples that have MLST alleles associated with the given scheme name.
:scheme_name: The scheme name.
:return: A list of Samples with MLST alleles with respect to the scheme name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_mlst_alleles) \
.join(SampleMLSTAlleles.scheme) \
.filter(MLSTScheme.name == scheme_name) \
.all()
return samples
def get_samples_with_variants_on_sequence(self, sequence_name: str) -> List[Sample]:
"""
Gets a list of all samples that have variants associated with the given sequence name.
:sequence_name: The sequence name.
:return: A list of Samples with variants with respect to the sequence name, empty list of no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.join(Reference.sequences) \
.filter(ReferenceSequence.sequence_name == sequence_name) \
.all()
return samples
def get_samples_associated_with_reference(self, reference_name: str) -> List[Sample]:
"""
Gets a list of all samples associated with a reference name.
:reference_name: The reference name.
:return: A list of Samples associated with the reference name or an empty list if no Samples.
"""
samples = self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()
return samples
def get_samples_set_associated_with_reference(self, reference_name: str) -> SampleSet:
"""
Gets a list of all samples associated with a reference name.
:reference_name: The reference name.
:return: A list of Samples associated with the reference name or an empty list if no Samples.
"""
sample_ids = [i for i, in self._connection.get_session().query(Sample.id) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.all()]
return SampleSet(sample_ids=sample_ids)
def create_dataframe_from_sample_set(self, present_set: SampleSet,
absent_set: SampleSet,
unknown_set: SampleSet,
queries_expression: str) -> pd.DataFrame:
sample_sets_status_list = [(present_set, 'Present'), (absent_set, 'Absent'), (unknown_set, 'Unknown')]
data = []
for sample_status in sample_sets_status_list:
sample_set = sample_status[0]
status = sample_status[1]
if not sample_set.is_empty():
samples = self.find_samples_by_ids(sample_set)
for sample in samples:
data.append([queries_expression, sample.name, sample.id, status])
return pd.DataFrame(data=data, columns=['Query', 'Sample Name', 'Sample ID', 'Status'])
def count_samples_associated_with_reference(self, reference_name: str) -> int:
return self._connection.get_session().query(Sample) \
.join(Sample.sample_nucleotide_variation) \
.join(SampleNucleotideVariation.reference) \
.filter(Reference.name == reference_name) \
.count()
def count_samples_associated_with_mlst_scheme(self, scheme_name: str) -> int:
return len(self.get_samples_with_mlst_alleles(scheme_name))
def get_samples(self) -> List[Sample]:
return self._connection.get_session().query(Sample).all()
def count_samples(self) -> int:
return self._connection.get_session().query(Sample).count()
def get_all_sample_ids(self) -> SampleSet:
ids_list = [id for id, in self._connection.get_session().query(Sample.id).all()]
return SampleSet(ids_list)
def get_existing_samples_by_names(self, sample_names: List[str]) -> List[Sample]:
return self._connection.get_session().query(Sample) \
.filter(Sample.name.in_(sample_names)) \
.all()
def which_exists(self, sample_names: List[str]) -> List[str]:
"""
Returns which of the given samples exist in the database.
:param sample_names: The list of sample names.
:return: A list of those passed sample names that exist in the database.
"""
samples = self._connection.get_session().query(Sample) \
.filter(Sample.name.in_(sample_names)) \
.all()
return [sample.name for sample in samples]
def get_sample(self, sample_name: str) -> Sample:
return self._connection.get_session().query(Sample) \
.filter(Sample.name == sample_name) \
.one()
def exists(self, sample_name: str):
return self._connection.get_session().query(Sample) \
.filter(Sample.name == sample_name).count() > 0
def find_samples_by_ids(self, sample_ids: Union[List[int], SampleSet]) -> List[Sample]:
if isinstance(sample_ids, SampleSet):
sample_ids = list(sample_ids)
query_batcher = SQLQueryInBatcherList(in_data=sample_ids, batch_size=self._sql_select_limit)
def handle_batch(sample_ids_batch: List[int]) -> List[Sample]:
return self._connection.get_session().query(Sample) \
.filter(Sample.id.in_(sample_ids_batch)) \
.all()
return query_batcher.process(handle_batch)
def get_variants_samples_by_variation_features(self, features: List[QueryFeatureMutation]) -> Dict[
str, NucleotideVariantsSamples]:
standardized_features_to_input_feature = {}
standardized_features_ids = set()
standardized_feature_hgvs_c_ids = set()
standardized_feature_hgvs_p_ids = set()
for feature in features:
if isinstance(feature, QueryFeatureMutationSPDI):
dbf = NucleotideMutationTranslater.to_db_feature(feature)
if dbf.id in standardized_features_to_input_feature:
standardized_features_to_input_feature[dbf.id].append(feature.id)
else:
standardized_features_to_input_feature[dbf.id] = [feature.id]
standardized_features_ids.add(dbf.id)
elif isinstance(feature, QueryFeatureHGVSGN):
logger.warning(f'feature=[{feature}] is a QueryFeatureHGVSGN and I do not handle it here.')
elif isinstance(feature, QueryFeatureHGVS):
if feature.is_nucleotide():
standardized_feature_hgvs_c_ids.add(feature.id)
elif feature.is_protein():
standardized_feature_hgvs_p_ids.add(feature.id)
else:
raise Exception(f'feature=[{feature}] is neither nucleotide or protein')
else:
raise Exception(f'Invalid type for feature=[{feature}]. '
f'Must be either {QueryFeatureMutationSPDI.__class__.__name__} or '
f'{QueryFeatureHGVS.__class__.__name__}')
if len(standardized_features_ids) > 0:
variants_spdi = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._spdi.in_(standardized_features_ids)) \
.all()
else:
variants_spdi = []
if len(standardized_feature_hgvs_c_ids) > 0:
variants_hgvs_c = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_c.in_(standardized_feature_hgvs_c_ids)) \
.all()
else:
variants_hgvs_c = []
if len(standardized_feature_hgvs_p_ids) > 0:
variants_hgvs_p = self._connection.get_session().query(NucleotideVariantsSamples) \
.filter(NucleotideVariantsSamples._id_hgvs_p.in_(standardized_feature_hgvs_p_ids)) \
.all()
else:
variants_hgvs_p = []
# Map back unstandardized IDs to the actual variant object
# Use this because some features can have multiple identifiers for the same feature
# (e.g., ref:10:A:T and ref:10:1:T). I want to make sure I map each passed id to the
# same object (that is, in this example, I want to return a dictionary with two keys, one for each ID)
unstandardized_variants = {}
for v in variants_spdi:
for vid in standardized_features_to_input_feature[v.spdi]:
unstandardized_variants[vid] = v
unstandardized_variants.update({v.id_hgvs_c: v for v in variants_hgvs_c})
unstandardized_variants.update({v.id_hgvs_p: v for v in variants_hgvs_p})
return unstandardized_variants
def _get_mlst_samples_by_mlst_features(self, features: List[QueryFeatureMLST]) -> List[MLSTAllelesSamples]:
feature_ids = list({f.id_no_prefix for f in features})
mlst_alleles = self._connection.get_session().query(MLSTAllelesSamples) \
.filter(MLSTAllelesSamples._sla.in_(feature_ids)) \
.all()
return mlst_alleles
def _get_feature_type(self, features: List[QueryFeature]) -> Type[QueryFeature]:
feature_types = {f.__class__ for f in features}
if len(feature_types) != 1:
raise Exception(f'Should only be one feature type but instead got: {feature_types}.')
else:
return feature_types.pop()
def find_unknown_sample_sets_by_features(self, features: List[QueryFeature]) -> Dict[str, SampleSet]:
unknown_to_features_dict = {}
unknown_features = []
for feature in features:
try:
unknown_features_exploded = self.feature_explode_unknown(feature)
unknown_features.extend(unknown_features_exploded)
for unknown_feature in unknown_features_exploded:
unknown_to_features_dict[unknown_feature.id] = feature
except FeatureExplodeUnknownError as e:
logger.warning(
f'Could not map feature={feature} to a set of unknown features. Will assume no unknowns exist.')
if len(unknown_features) > 0:
unknown_features_sets = self.find_sample_sets_by_features(unknown_features)
else:
unknown_features_sets = set()
features_to_unknown_sample_sets = {}
for uid in unknown_features_sets:
fid = unknown_to_features_dict[uid].id
sample_set = unknown_features_sets[uid]
# If we've already set this sample set with the same feature,
# We need to merge together the unknown sample sets
# This can occur if, e.g., we have a large deletion and are iterating over each
# Base in the deletion in turn (e.g., ref:10:ATT:A -> gets converted to
# ['ref:10:A:?', 'ref:11:T:?', 'ref:12:T:?'], we need to merge unknown sample results
# for each of these features in turn.
if fid in features_to_unknown_sample_sets:
previous_sample_set = features_to_unknown_sample_sets[fid]
features_to_unknown_sample_sets[fid] = previous_sample_set.union(sample_set)
else:
features_to_unknown_sample_sets[fid] = sample_set
return features_to_unknown_sample_sets
def find_sample_sets_by_features(self, features: List[QueryFeature]) -> Dict[str, SampleSet]:
feature_type = self._get_feature_type(features)
if issubclass(feature_type, QueryFeatureHGVSGN):
# In this case where I'm querying by gene name, first convert to SPDI features before lookup
# TODO: it's not the most efficient to do this as a loop, but it's easier to implement right now
hgvs_gn_id_to_sampleset = dict()
for feature in features:
feature = cast(QueryFeatureHGVSGN, feature)
features_spdi = self.find_features_spdi_for_hgvsgn(feature)
variants_dict = self.get_variants_samples_by_variation_features(features_spdi)
variants_nuc_variants_samples = list(variants_dict.values())
if len(variants_nuc_variants_samples) == 0:
samples_union = SampleSet.create_empty()
else:
first_nuc_variant_samples = variants_nuc_variants_samples.pop()
samples_union = first_nuc_variant_samples.sample_ids
# Handle remaining, if any
for nuc_variant_samples in variants_nuc_variants_samples:
samples_union = samples_union.union(nuc_variant_samples.sample_ids)
hgvs_gn_id_to_sampleset[feature.id] = samples_union
return hgvs_gn_id_to_sampleset
elif issubclass(feature_type, QueryFeatureMutation):
features = cast(List[QueryFeatureMutation], features)
variants_dict = self.get_variants_samples_by_variation_features(features)
return {id: variants_dict[id].sample_ids for id in variants_dict}
elif issubclass(feature_type, QueryFeatureMLST):
features = cast(List[QueryFeatureMLST], features)
mlst_alleles = self._get_mlst_samples_by_mlst_features(features)
return {a.query_id: a.sample_ids for a in mlst_alleles}
else:
raise Exception(f'Invalid feature type {feature_type}')
def find_samples_by_features(self, features: List[QueryFeature]) -> Dict[str, List[Sample]]:
feature_type = self._get_feature_type(features)
if issubclass(feature_type, QueryFeatureMutation):
features = cast(List[QueryFeatureMutation], features)
variants_dict = self.get_variants_samples_by_variation_features(features)
return {id: self.find_samples_by_ids(variants_dict[id].sample_ids) for id in variants_dict}
elif issubclass(feature_type, QueryFeatureMLST):
features = cast(List[QueryFeatureMLST], features)
mlst_alleles = self._get_mlst_samples_by_mlst_features(features)
return {a.query_id: self.find_samples_by_ids(a.sample_ids) for a in mlst_alleles}
else:
raise Exception(f'Invalid feature type {feature_type}')
def count_samples_by_features(self, features: List[QueryFeature]) -> Dict[str, List[Sample]]:
feature_type = self._get_feature_type(features)
if issubclass(feature_type, QueryFeatureMutation):
features = cast(List[QueryFeatureMutation], features)
variants_dict = self.get_variants_samples_by_variation_features(features)
return {id: len(variants_dict[id].sample_ids) for id in variants_dict}
elif issubclass(feature_type, QueryFeatureMLST):
features = cast(List[QueryFeatureMLST], features)
mlst_alleles = self._get_mlst_samples_by_mlst_features(features)
allele_id_to_count = {a.query_id: len(a.sample_ids) for a in mlst_alleles}
for f in features:
if f.id not in allele_id_to_count:
allele_id_to_count[f.id] = 0
return allele_id_to_count
else:
raise Exception(f'Invalid feature type {feature_type}')
def find_sample_name_ids(self, sample_names: Set[str]) -> Dict[str, int]:
"""
Given a list of sample names, returns a dictionary mapping the sample names to sample IDs.
:param sample_names: The sample names to search.
:return: A dictionary linking the sample names to IDs.
"""
query_batcher = SQLQueryInBatcherDict(in_data=list(sample_names), batch_size=self._sql_select_limit)
def handle_batch(sample_names_batch: List[str]) -> Dict[str, int]:
sample_tuples = self._connection.get_session().query(Sample.name, Sample.id) \
.filter(Sample.name.in_(sample_names_batch)) \
.all()
return dict(sample_tuples)
return query_batcher.process(handle_batch)
def get_sample_set_by_names(self, sample_names: Union[List[str], Set[str]],
ignore_not_found: bool = False) -> SampleSet:
"""
Given a collection of sample names, get a SampleSet of the corresponding IDs.
:param sample_names: The names to convert to an ID set.
:param ignore_not_found: Whether or not to ignore sample names that were not found.
:return: A SampleSet with all the corresponding samples by the passed names. If ignore_not_found is false,
raises an exception if some sample names have no ids.
"""
if isinstance(sample_names, list):
sample_names = set(sample_names)
elif not isinstance(sample_names, set):
raise Exception(f'Invalid type=[{type(sample_names)}] for passed sample_names. Must be list or set.')
sample_ids_tuples = self._connection.get_session().query(Sample.id) \
.filter(Sample.name.in_(sample_names)) \
.all()
sample_ids = {i for i, in sample_ids_tuples}
sample_set = SampleSet(sample_ids=sample_ids)
if ignore_not_found or len(sample_names) == len(sample_set):
return sample_set
else:
# Find matching sample names to ids we did find for a nicer error message
found_sample_names = {s.name for s in self.find_samples_by_ids(sample_set)}
names_not_found = sample_names - found_sample_names
if len(names_not_found) > 10:
small_not_found = list(names_not_found)[:10]
msg = f'[{", ".join(small_not_found)}, ...]'
else:
msg = f'{names_not_found}'
raise Exception(f'Did not find an equal number of sample names and ids. '
f'Number sample_names={len(sample_names)}. Number returned sample_ids={len(sample_ids)}. '
f'Sample names with missing ids {msg}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.