blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70e5892f21915fa12f48386313254ed1b19228ce
|
002d925a46fef6867c7092935a5a4113a11cf0c5
|
/care/facility/models/mixins/permissions/patient.py
|
f014b4f2a4614f320b4aed3be447869eecbe6c70
|
[
"MIT"
] |
permissive
|
coronasafe/care
|
ba74c06c6486e8cd3c11e0f8b3d948e99d304746
|
c000eea7f1c79a37b0fa53eba09696cd95122202
|
refs/heads/master
| 2023-08-31T12:52:08.181541
| 2023-08-29T13:43:33
| 2023-08-29T13:43:33
| 247,995,671
| 216
| 218
|
MIT
| 2023-09-13T14:52:59
| 2020-03-17T14:48:11
|
Python
|
UTF-8
|
Python
| false
| false
| 8,692
|
py
|
patient.py
|
from care.facility.models import Facility, User
from care.facility.models.mixins.permissions.base import BasePermissionMixin
class PatientPermissionMixin(BasePermissionMixin):
@staticmethod
def has_write_permission(request):
if request.user.asset:
return False
if (
request.user.user_type == User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StaffReadOnly"]
):
return False
return (
request.user.is_superuser
or request.user.verified
and request.user.user_type >= User.TYPE_VALUE_MAP["Staff"]
)
def has_object_read_permission(self, request):
doctor_allowed = False
if self.last_consultation:
doctor_allowed = (
self.last_consultation.assigned_to == request.user
or request.user == self.assigned_to
)
return request.user.is_superuser or (
(hasattr(self, "created_by") and request.user == self.created_by)
or (
self.facility
and request.user in self.facility.users.all()
or doctor_allowed
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
and (
request.user.district == self.district
or (
self.facility
and request.user.district == self.facility.district
)
)
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]
and (
request.user.state == self.state
or (self.facility and request.user.state == self.facility.state)
)
)
)
def has_object_write_permission(self, request):
if request.user.asset:
return False
doctor_allowed = False
if self.last_consultation:
doctor_allowed = (
self.last_consultation.assigned_to == request.user
or request.user == self.assigned_to
)
if (
request.user.user_type == User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StaffReadOnly"]
):
return False
return request.user.is_superuser or (
(hasattr(self, "created_by") and request.user == self.created_by)
or (doctor_allowed)
# or (self.facility and request.user in self.facility.users.all())
or (self.facility and self.facility == request.user.home_facility)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
and (
request.user.district == self.district
or (
self.facility
and request.user.district == self.facility.district
)
)
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]
and (
request.user.state == self.state
or (self.facility and request.user.state == self.facility.state)
)
)
)
def has_object_update_permission(self, request):
if request.user.asset:
return False
doctor_allowed = False
if self.last_consultation:
doctor_allowed = (
self.last_consultation.assigned_to == request.user
or request.user == self.assigned_to
)
if (
request.user.user_type == User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StaffReadOnly"]
):
return False
return (
request.user.is_superuser
or (hasattr(self, "created_by") and request.user == self.created_by)
or (self.facility and self.facility == request.user.home_facility)
or (doctor_allowed)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
and (
request.user.district == self.district
or (
self.facility
and request.user.district == self.facility.district
)
)
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]
and (
request.user.state == self.state
or (self.facility and request.user.state == self.facility.state)
)
)
)
def has_object_icmr_sample_permission(self, request):
return self.has_object_read_permission(request)
def has_object_transfer_permission(self, request):
if request.user.asset:
return False
if (
request.user.user_type == User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StaffReadOnly"]
):
return False
new_facility = Facility.objects.filter(
id=request.data.get("facility", None)
).first()
return self.has_object_update_permission(request) or (
new_facility and request.user in new_facility.users.all()
)
class PatientRelatedPermissionMixin(BasePermissionMixin):
@staticmethod
def has_write_permission(request):
if (
request.user.user_type == User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StaffReadOnly"]
):
return False
return (
request.user.is_superuser
or request.user.verified
and request.user.user_type >= User.TYPE_VALUE_MAP["Staff"]
)
def has_object_read_permission(self, request):
return (
request.user.is_superuser
or (
self.patient.facility
and request.user in self.patient.facility.users.all()
)
or (
self.assigned_to == request.user
or request.user == self.patient.assigned_to
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
and (
self.patient.facility
and request.user.district == self.patient.facility.district
)
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]
and (
self.patient.facility
and request.user.state == self.patient.facility.state
)
)
)
def has_object_update_permission(self, request):
if (
request.user.user_type == User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]
or request.user.user_type == User.TYPE_VALUE_MAP["StaffReadOnly"]
):
return False
return (
request.user.is_superuser
or (
self.patient.facility
and self.patient.facility == request.user.home_facility
)
or (
self.assigned_to == request.user
or request.user == self.patient.assigned_to
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
and (
self.patient.facility
and request.user.district == self.patient.facility.district
)
)
or (
request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]
and (
self.patient.facility
and request.user.state == self.patient.facility.state
)
)
)
|
e2626e6e6231ceca0bfb056ef905d763f2c368ca
|
ef1def58b933921ccf31bece9fc6eb5f7ffb9a18
|
/tensorhive/core/utils/mailer.py
|
55fb924e98a2f65458e39c35bfdcfbb9c1eaac81
|
[
"Apache-2.0"
] |
permissive
|
roscisz/TensorHive
|
4b33acd727e0b294a4a12af972c471e1254136aa
|
5b50245d285618044a9a71c06ea5361a48ad4acb
|
refs/heads/master
| 2023-03-10T05:09:08.874394
| 2022-02-02T11:08:21
| 2022-02-02T11:08:21
| 98,513,283
| 153
| 26
|
Apache-2.0
| 2023-03-01T02:26:54
| 2017-07-27T08:37:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,621
|
py
|
mailer.py
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Union, Dict, Any, Optional, List
import smtplib
from smtplib import SMTPException
import os
import logging
log = logging.getLogger(__name__)
class Message:
'''
Represents an email message.
Allows for sending to a copy to multiple recipients.
Send using the Mailer class.
'''
def __init__(self, author: str, to: Union[str, List[str]], subject: str, body: str):
msg = MIMEMultipart()
msg['From'] = author
msg['To'] = ', '.join(to) if isinstance(to, list) else to
msg['Subject'] = subject
msg.attach(MIMEText(body or '', 'html'))
self.msg = msg
@property
def author(self):
return self.msg['From']
@property
def recipients(self):
return self.msg['To']
@property
def subject(self):
return self.msg['Subject']
@property
def body(self):
return self.msg.as_string()
def __str__(self):
return '''
From: {}
To: {}
Subject: {}
Body: {}
'''.format(self.author, self.recipients, self.subject, self.body)
class MessageBodyTemplater:
def __init__(self, template: str):
self.template = template
def fill_in(self, data: Dict[str, Any]) -> str:
return self.template.format(
gpus=data['GPUS'],
intruder_username=data['INTRUDER_USERNAME'],
intruder_email=data['INTRUDER_EMAIL'],
owners=data['OWNERS']
)
class Mailer:
def __init__(self, server: str, port: int) -> None:
self.smtp_server = server
self.smtp_port = port
self.server = None # type: smtplib.SMTP
def send(self, message: Message) -> None:
assert self.server, 'Must call connect() first!'
assert message.author and message.recipients and message.body, 'Incomplete email body: {}'.format(message)
try:
self.server.sendmail(message.author, message.recipients, message.body)
except SMTPException as e:
log.error('Error while sending email: {}'.format(e))
def connect(self, login: str, password: str) -> None:
# assert login and password, 'Login and password must not be None!'
# assert self.smtp_server and self.smtp_port, 'SMTP server and port must not be None!'
self.server = smtplib.SMTP(self.smtp_server, self.smtp_port)
self.server.starttls()
self.server.login(login, password)
def disconnect(self) -> None:
self.server.close()
|
756d281e54cce80802b6d1c161167ca17a138343
|
026b4b96a9244ff3ad347294e5bbd34c90d817a3
|
/eeauditor/auditors/oci/OCI_VCN_NetworkSecurityGroup_Auditor.py
|
f15d2a8ab65859d409779dd5062b51a5ba9469ba
|
[
"Apache-2.0"
] |
permissive
|
jonrau1/ElectricEye
|
954e57c14c46df31d17fd556f13d8bd92de5bcd1
|
5156da4b9fe0a66dda3e236a4090ba2cf737c6a5
|
refs/heads/master
| 2023-08-28T08:29:22.315687
| 2023-06-27T00:27:26
| 2023-06-27T00:27:26
| 238,931,093
| 784
| 103
|
Apache-2.0
| 2023-09-11T03:39:48
| 2020-02-07T13:32:16
|
Python
|
UTF-8
|
Python
| false
| false
| 26,454
|
py
|
OCI_VCN_NetworkSecurityGroup_Auditor.py
|
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import os
import oci
from oci.config import validate_config
import datetime
import base64
import json
from check_register import CheckRegister
registry = CheckRegister()
# Filename of the SL Auditor config JSON file
dirPath = os.path.dirname(os.path.realpath(__file__))
configFile = f"{dirPath}/electriceye_oci_vcn_nsg_auditor_config.json"
def process_response(responseObject):
"""
Receives an OCI Python SDK `Response` type (differs by service) and returns a JSON object
"""
payload = json.loads(
str(
responseObject
)
)
return payload
def get_oci_network_security_groups(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):
response = cache.get("get_oci_network_security_groups")
if response:
return response
# Create & Validate OCI Creds - do this after cache check to avoid doing it a lot
config = {
"tenancy": ociTenancyId,
"user": ociUserId,
"region": ociRegionName,
"fingerprint": ociUserApiKeyFingerprint,
"key_file": os.environ["OCI_PEM_FILE_PATH"],
}
validate_config(config)
vncClient = oci.core.VirtualNetworkClient(config)
extendedNsgs = []
for compartment in ociCompartments:
for nsg in vncClient.list_network_security_groups(compartment_id=compartment).data:
# The NetworkSecurityGroup response object only contains information about the NSG itself, not the rules
nsg = process_response(
nsg
)
nsgId = nsg["id"]
# The rules come from a separate API call to ListNetworkSecurityGroupSecurityRules - they will be added to a "network_security_group_security_rules"
# list within the rest of the NetworkSecurityGroup response object - not how it comes from OCI but is better suited for what we need
rules = process_response(
vncClient.list_network_security_group_security_rules(
network_security_group_id=nsgId
).data
)
nsg["network_security_group_security_rules"] = rules
extendedNsgs.append(nsg)
cache["get_oci_network_security_groups"] = extendedNsgs
return cache["get_oci_network_security_groups"]
@registry.register_check("oci.vcn.nsg")
def oci_vcn_security_list_all_open_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):
""""[OCI.NetworkSecurityGroup.1] Virtual Cloud Network Network Security Groups should not allow unrestricted access to all ports and protocols"""
# ISO Time
iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()
for nsg in get_oci_network_security_groups(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):
# B64 encode all of the details for the Asset
assetJson = json.dumps(nsg, default=str).encode("utf-8")
assetB64 = base64.b64encode(assetJson)
compartmentId = nsg["compartment_id"]
nsgName = nsg["display_name"]
nsgId = nsg["id"]
vcnId = nsg["vcn_id"]
lifecycleState = nsg["lifecycle_state"]
createdAt = nsg["time_created"]
# Create a list comprehension to scope down the amount of rules that need to be looked at
allowAllCheck = [
rule for rule in nsg["network_security_group_security_rules"]
if rule.get("protocol") == "all"
and rule.get("source") == "0.0.0.0/0"
]
# If the list has an entry that means there is at least one rule that allows all ports & protocols
if allowAllCheck:
finding = {
"SchemaVersion": "2018-10-08",
"Id": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/oci-vcn-nsgs-all-open-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/oci-vcn-nsgs-all-open-check",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "CRITICAL"},
"Confidence": 99,
"Title": "[OCI.NetworkSecurityGroup.1] Virtual Cloud Network Network Security Groups should not allow unrestricted access to all ports and protocols",
"Description": f"Virtual Cloud Network Network Security Group {nsgName} for VCN {vcnId} in Compartment {compartmentId} in {ociRegionName} contains a rule that allows unrestricted access to all ports and protocols. Network security groups (NSGs) act as a virtual firewall for your compute instances and other kinds of resources. An NSG consists of a set of ingress and egress security rules that apply only to a set of VNICs of your choice in a single VCN (for example: all the compute instances that act as web servers in the web tier of a multi-tier application in your VCN). NSG security rules function the same as security list rules. However, for an NSG security rule's source (for ingress rules) or destination (for egress rules), you can specify an NSG instead of a CIDR. This means you can easily write security rules to control traffic between two NSGs in the same VCN, or traffic within a single NSG. See Parts of a Security Rule. Allowing unfettered access removes an important part of a cloud security defense-in-depth and makes it easier for adversaries to perform recon on your assets and potentially gain unauthorized access where no other network-based controls exist. Your security list should still be audited to ensure any other rules are compliant with organizational or regulatory requirements. Additionally, audit that other network security controls such as Security Lists, Web Application Firewalls, Network Firewalls, and other host- and network-based appliances and services are configured to mitigate the risk posed by this Security List. Refer to the remediation instructions if this configuration is not intended.",
"Remediation": {
"Recommendation": {
"Text": "For more information on building and modifying Security Rules (for NSGs and SLs) refer to the Security Rules section of the Oracle Cloud Infrastructure Documentation for Networks.",
"Url": "https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/securityrules.htm#sec_rules_parts",
}
},
"ProductFields": {
"ProductName": "ElectricEye",
"Provider": "OCI",
"ProviderType": "CSP",
"ProviderAccountId": ociTenancyId,
"AssetRegion": ociRegionName,
"AssetDetails": assetB64,
"AssetClass": "Networking",
"AssetService": "Oracle Cloud Virtual Cloud Network",
"AssetComponent": "Network Security Group"
},
"Resources": [
{
"Type": "OciVcnSecurityList",
"Id": nsgId,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"TenancyId": ociTenancyId,
"CompartmentId": compartmentId,
"Region": ociRegionName,
"Name": nsgName,
"Id": nsgId,
"VirtualCloudNetworkId": vcnId,
"CreatedAt": createdAt,
"LifecycleState": lifecycleState
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF V1.1 PR.AC-3",
"NIST SP 800-53 Rev. 4 AC-1",
"NIST SP 800-53 Rev. 4 AC-17",
"NIST SP 800-53 Rev. 4 AC-19",
"NIST SP 800-53 Rev. 4 AC-20",
"NIST SP 800-53 Rev. 4 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
]
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/oci-vcn-nsgs-all-open-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/oci-vcn-nsgs-all-open-check",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[OCI.NetworkSecurityGroup.1] Virtual Cloud Network Network Security Groups should not allow unrestricted access to all ports and protocols",
"Description": f"Virtual Cloud Network Network Security Group {nsgName} for VCN {vcnId} in Compartment {compartmentId} in {ociRegionName} does not contain a rule that allows unrestricted access to all ports and protocols.",
"Remediation": {
"Recommendation": {
"Text": "For more information on building and modifying Security Rules (for NSGs and SLs) refer to the Security Rules section of the Oracle Cloud Infrastructure Documentation for Networks.",
"Url": "https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/securityrules.htm#sec_rules_parts",
}
},
"ProductFields": {
"ProductName": "ElectricEye",
"Provider": "OCI",
"ProviderType": "CSP",
"ProviderAccountId": ociTenancyId,
"AssetRegion": ociRegionName,
"AssetDetails": assetB64,
"AssetClass": "Networking",
"AssetService": "Oracle Cloud Virtual Cloud Network",
"AssetComponent": "Network Security Group"
},
"Resources": [
{
"Type": "OciVcnSecurityList",
"Id": nsgId,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"TenancyId": ociTenancyId,
"CompartmentId": compartmentId,
"Region": ociRegionName,
"Name": nsgName,
"Id": nsgId,
"VirtualCloudNetworkId": vcnId,
"CreatedAt": createdAt,
"LifecycleState": lifecycleState
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF V1.1 PR.AC-3",
"NIST SP 800-53 Rev. 4 AC-1",
"NIST SP 800-53 Rev. 4 AC-17",
"NIST SP 800-53 Rev. 4 AC-19",
"NIST SP 800-53 Rev. 4 AC-20",
"NIST SP 800-53 Rev. 4 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
]
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding
@registry.register_check("oci.vcn.nsg")
def oci_vcn_security_master_auditor_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):
""""[OCI.NetworkSecurityGroup.{checkIdNumber}] Virtual Cloud Network Network Security Groups should not allow unrestricted {protocol} access"""
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
# Open the Configuration file and parse the information within the dynamically populate this auditor
with open(configFile, 'r') as jsonfile:
auditRules = json.load(jsonfile)
# Grab Sec Lists from Cache
for nsg in get_oci_network_security_groups(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):
# B64 encode all of the details for the Asset
assetJson = json.dumps(nsg, default=str).encode("utf-8")
assetB64 = base64.b64encode(assetJson)
compartmentId = nsg["compartment_id"]
nsgName = nsg["display_name"]
nsgId = nsg["id"]
vcnId = nsg["vcn_id"]
lifecycleState = nsg["lifecycle_state"]
createdAt = nsg["time_created"]
for x in auditRules:
toPortTarget = x["ToPort"]
fromPortTarget = x["FromPort"]
targetProtocol = x["Protocol"]
checkTitle = x["CheckTitle"]
checkId = x["CheckId"]
checkDescription = x["CheckDescriptor"]
if targetProtocol == "17":
portFilterDict = "udp_options"
elif targetProtocol == "6":
portFilterDict = "tcp_options"
else:
continue
# Create a list comprehension to scope down the amount of rules that need to be looked at
# as the list comprehension runs all of the conditional logic, there are times where the
# rules are DESTINATION PORT and not source port, so we cannot skip them but we'll need
# to bypass any AttributeError because of them
try:
filteredRules = [
rule for rule in nsg["network_security_group_security_rules"]
if rule.get("protocol") == targetProtocol
and rule.get("source") == "0.0.0.0/0"
and rule[portFilterDict]["destination_port_range"].get("max") == toPortTarget
and rule[portFilterDict]["destination_port_range"].get("min") == fromPortTarget
]
except AttributeError:
continue
except TypeError:
continue
# If the "filteredRules" list has at least one entry it means there is a rule that allows access to everyone (on CIDRs) for a specific rule
# that means there will always be a "counter" finding for it so the changes can be monitored over time
if filteredRules:
finding = {
"SchemaVersion": "2018-10-08",
"Id": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/{checkId}",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/{checkId}",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": checkTitle,
"Description": f"Virtual Cloud Network Network Security Groups {nsgName} for VCN {vcnId} in Compartment {compartmentId} in {ociRegionName} contains a rule that allows unrestricted {checkDescription} access. Network security groups (NSGs) act as a virtual firewall for your compute instances and other kinds of resources. An NSG consists of a set of ingress and egress security rules that apply only to a set of VNICs of your choice in a single VCN (for example: all the compute instances that act as web servers in the web tier of a multi-tier application in your VCN). NSG security rules function the same as security list rules. However, for an NSG security rule's source (for ingress rules) or destination (for egress rules), you can specify an NSG instead of a CIDR. This means you can easily write security rules to control traffic between two NSGs in the same VCN, or traffic within a single NSG. See Parts of a Security Rule. Allowing unfettered access removes an important part of a cloud security defense-in-depth and makes it easier for adversaries to perform recon on your assets and potentially gain unauthorized access where no other network-based controls exist. Your security list should still be audited to ensure any other rules are compliant with organizational or regulatory requirements. Additionally, audit that other network security controls such as Security Lists, Web Application Firewalls, Network Firewalls, and other host- and network-based appliances and services are configured to mitigate the risk posed by this Security List. Refer to the remediation instructions if this configuration is not intended.",
"Remediation": {
"Recommendation": {
"Text": "For more information on building and modifying Security Rules (for NSGs and SLs) refer to the Security Rules section of the Oracle Cloud Infrastructure Documentation for Networks.",
"Url": "https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/securityrules.htm#sec_rules_parts",
}
},
"ProductFields": {
"ProductName": "ElectricEye",
"Provider": "OCI",
"ProviderType": "CSP",
"ProviderAccountId": ociTenancyId,
"AssetRegion": ociRegionName,
"AssetDetails": assetB64,
"AssetClass": "Networking",
"AssetService": "Oracle Cloud Virtual Cloud Network",
"AssetComponent": "Network Security Group"
},
"Resources": [
{
"Type": "OciVcnSecurityList",
"Id": nsgId,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"TenancyId": ociTenancyId,
"CompartmentId": compartmentId,
"Region": ociRegionName,
"Name": nsgName,
"Id": nsgId,
"VirtualCloudNetworkId": vcnId,
"CreatedAt": createdAt,
"LifecycleState": lifecycleState
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF V1.1 PR.AC-3",
"NIST SP 800-53 Rev. 4 AC-1",
"NIST SP 800-53 Rev. 4 AC-17",
"NIST SP 800-53 Rev. 4 AC-19",
"NIST SP 800-53 Rev. 4 AC-20",
"NIST SP 800-53 Rev. 4 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
]
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/{checkId}",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": f"{ociTenancyId}/{ociRegionName}/{compartmentId}/{nsgId}/{checkId}",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": checkTitle,
"Description": f"Virtual Cloud Network Network Security Groups {nsgName} for VCN {vcnId} in Compartment {compartmentId} in {ociRegionName} does not contain a rule that allows unrestricted {checkDescription} access. Your security list should still be audited to ensure any other rules are compliant with organizational or regulatory requirements.",
"Remediation": {
"Recommendation": {
"Text": "For more information on building and modifying Security Rules (for NSGs and SLs) refer to the Security Rules section of the Oracle Cloud Infrastructure Documentation for Networks.",
"Url": "https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/securityrules.htm#sec_rules_parts",
}
},
"ProductFields": {
"ProductName": "ElectricEye",
"Provider": "OCI",
"ProviderType": "CSP",
"ProviderAccountId": ociTenancyId,
"AssetRegion": ociRegionName,
"AssetDetails": assetB64,
"AssetClass": "Networking",
"AssetService": "Oracle Cloud Virtual Cloud Network",
"AssetComponent": "Network Security Group"
},
"Resources": [
{
"Type": "OciVcnSecurityList",
"Id": nsgId,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"TenancyId": ociTenancyId,
"CompartmentId": compartmentId,
"Region": ociRegionName,
"Name": nsgName,
"Id": nsgId,
"VirtualCloudNetworkId": vcnId,
"CreatedAt": createdAt,
"LifecycleState": lifecycleState
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF V1.1 PR.AC-3",
"NIST SP 800-53 Rev. 4 AC-1",
"NIST SP 800-53 Rev. 4 AC-17",
"NIST SP 800-53 Rev. 4 AC-19",
"NIST SP 800-53 Rev. 4 AC-20",
"NIST SP 800-53 Rev. 4 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
]
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding
## END ??
|
4042223b58a6e71a60858c7bc07a2aeaf7e5e2f0
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/test/document_stores/test_search_engine.py
|
fc819fa7ed303480c9102451d3b0009826f0ff59
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 11,055
|
py
|
test_search_engine.py
|
from typing import Optional
from unittest.mock import MagicMock
import numpy as np
import pytest
from haystack.document_stores.search_engine import SearchEngineDocumentStore
from haystack.schema import FilterType
@pytest.mark.unit
def test_prepare_hosts():
pass
@pytest.mark.document_store
class SearchEngineDocumentStoreTestAbstract:
"""
This is the base class for any Searchengine Document Store testsuite, it doesn't have the `Test` prefix in the name
because we want to run its methods only in subclasses.
"""
@pytest.fixture
def mocked_get_all_documents_in_index(self, monkeypatch):
method_mock = MagicMock(return_value=None)
monkeypatch.setattr(SearchEngineDocumentStore, "_get_all_documents_in_index", method_mock)
return method_mock
# Constants
query = "test"
@pytest.mark.integration
def test___do_bulk(self):
pass
@pytest.mark.integration
def test___do_scan(self):
pass
@pytest.mark.integration
def test_query_by_embedding(self):
pass
@pytest.mark.integration
def test_get_meta_values_by_key(self, ds, documents):
ds.write_documents(documents)
# test without filters or query
result = ds.get_metadata_values_by_key(key="name")
assert result == [
{"count": 3, "value": "name_0"},
{"count": 3, "value": "name_1"},
{"count": 3, "value": "name_2"},
]
# test with filters but no query
result = ds.get_metadata_values_by_key(key="year", filters={"month": ["01"]})
assert result == [{"count": 3, "value": "2020"}]
# test with filters & query
result = ds.get_metadata_values_by_key(key="year", query="Bar")
assert result == [{"count": 3, "value": "2021"}]
@pytest.mark.unit
def test_query_return_embedding_true(self, mocked_document_store):
mocked_document_store.return_embedding = True
mocked_document_store.query(self.query)
# assert the resulting body is consistent with the `excluded_meta_data` value
_, kwargs = mocked_document_store.client.search.call_args
assert "_source" not in kwargs
@pytest.mark.unit
def test_query_return_embedding_false(self, mocked_document_store):
mocked_document_store.return_embedding = False
mocked_document_store.query(self.query)
# assert the resulting body is consistent with the `excluded_meta_data` value
_, kwargs = mocked_document_store.client.search.call_args
assert kwargs["_source"] == {"excludes": ["embedding"]}
@pytest.mark.unit
def test_query_excluded_meta_data_return_embedding_true(self, mocked_document_store):
mocked_document_store.return_embedding = True
mocked_document_store.excluded_meta_data = ["foo", "embedding"]
mocked_document_store.query(self.query)
_, kwargs = mocked_document_store.client.search.call_args
# we expect "embedding" was removed from the final query
assert kwargs["_source"] == {"excludes": ["foo"]}
@pytest.mark.unit
def test_query_excluded_meta_data_return_embedding_false(self, mocked_document_store):
mocked_document_store.return_embedding = False
mocked_document_store.excluded_meta_data = ["foo"]
mocked_document_store.query(self.query)
# assert the resulting body is consistent with the `excluded_meta_data` value
_, kwargs = mocked_document_store.client.search.call_args
assert kwargs["_source"] == {"excludes": ["foo", "embedding"]}
@pytest.mark.unit
def test_get_all_documents_return_embedding_true(self, mocked_document_store):
mocked_document_store.return_embedding = False
mocked_document_store.client.search.return_value = {}
mocked_document_store.get_all_documents(return_embedding=True)
# assert the resulting body is consistent with the `excluded_meta_data` value
_, kwargs = mocked_document_store.client.search.call_args
assert "_source" not in kwargs
@pytest.mark.unit
def test_get_all_documents_return_embedding_false(self, mocked_document_store):
mocked_document_store.return_embedding = True
mocked_document_store.client.search.return_value = {}
mocked_document_store.get_all_documents(return_embedding=False)
# assert the resulting body is consistent with the `excluded_meta_data` value
_, kwargs = mocked_document_store.client.search.call_args
# starting with elasticsearch client 7.16, scan() uses the query parameter instead of body,
# see https://github.com/elastic/elasticsearch-py/commit/889edc9ad6d728b79fadf790238b79f36449d2e2
body = kwargs.get("body", kwargs)
assert body["_source"] == {"excludes": ["embedding"]}
@pytest.mark.unit
def test_get_all_documents_excluded_meta_data_has_no_influence(self, mocked_document_store):
mocked_document_store.excluded_meta_data = ["foo"]
mocked_document_store.client.search.return_value = {}
mocked_document_store.get_all_documents(return_embedding=False)
# assert the resulting body is not affected by the `excluded_meta_data` value
_, kwargs = mocked_document_store.client.search.call_args
# starting with elasticsearch client 7.16, scan() uses the query parameter instead of body,
# see https://github.com/elastic/elasticsearch-py/commit/889edc9ad6d728b79fadf790238b79f36449d2e2
body = kwargs.get("body", kwargs)
assert body["_source"] == {"excludes": ["embedding"]}
@pytest.mark.unit
def test_get_document_by_id_return_embedding_true(self, mocked_document_store):
mocked_document_store.return_embedding = True
mocked_document_store.get_document_by_id("123")
# assert the resulting body is consistent with the `excluded_meta_data` value
_, kwargs = mocked_document_store.client.search.call_args
assert "_source" not in kwargs
@pytest.mark.unit
def test_get_all_labels_legacy_document_id(self, mocked_document_store, mocked_get_all_documents_in_index):
mocked_get_all_documents_in_index.return_value = [
{
"_id": "123",
"_source": {
"query": "Who made the PDF specification?",
"document": {
"content": "Some content",
"content_type": "text",
"score": None,
"id": "fc18c987a8312e72a47fb1524f230bb0",
"meta": {},
"embedding": [0.1, 0.2, 0.3],
},
"answer": {
"answer": "Adobe Systems",
"type": "extractive",
"context": "Some content",
"offsets_in_context": [{"start": 60, "end": 73}],
"offsets_in_document": [{"start": 60, "end": 73}],
# legacy document_id answer
"document_id": "fc18c987a8312e72a47fb1524f230bb0",
"meta": {},
"score": None,
},
"is_correct_answer": True,
"is_correct_document": True,
"origin": "user-feedback",
"pipeline_id": "some-123",
},
}
]
labels = mocked_document_store.get_all_labels()
assert labels[0].answer.document_ids == ["fc18c987a8312e72a47fb1524f230bb0"]
@pytest.mark.unit
def test_query_batch_req_for_each_batch(self, mocked_document_store):
mocked_document_store.batch_size = 2
mocked_document_store.query_batch([self.query] * 3)
assert mocked_document_store.client.msearch.call_count == 2
@pytest.mark.unit
def test_query_by_embedding_batch_req_for_each_batch(self, mocked_document_store):
mocked_document_store.batch_size = 2
mocked_document_store.query_by_embedding_batch([np.array([1, 2, 3])] * 3)
assert mocked_document_store.client.msearch.call_count == 2
@pytest.mark.integration
def test_document_with_version_metadata(self, ds: SearchEngineDocumentStore):
ds.write_documents([{"content": "test", "meta": {"version": "2023.1"}}])
documents = ds.get_all_documents()
assert documents[0].meta["version"] == "2023.1"
@pytest.mark.integration
def test_label_with_version_metadata(self, ds: SearchEngineDocumentStore):
ds.write_labels(
[
{
"query": "test",
"document": {"content": "test"},
"is_correct_answer": True,
"is_correct_document": True,
"origin": "gold-label",
"meta": {"version": "2023.1"},
"answer": None,
}
]
)
labels = ds.get_all_labels()
assert labels[0].meta["version"] == "2023.1"
@pytest.mark.integration
@pytest.mark.parametrize(
"query,filters,result_count",
[
# test happy path
("tost", {"year": ["2020", "2021", "1990"]}, 4),
# test empty filters
("test", None, 5),
# test linefeeds in query
("test\n", {"year": "2021"}, 3),
# test double quote in query
('test"', {"year": "2021"}, 3),
# test non-matching query
("toast", None, 0),
],
)
def test_custom_query(
self, query: str, filters: Optional[FilterType], result_count: int, ds: SearchEngineDocumentStore
):
documents = [
{"id": "1", "content": "test", "meta": {"year": "2019"}},
{"id": "2", "content": "test", "meta": {"year": "2020"}},
{"id": "3", "content": "test", "meta": {"year": "2021"}},
{"id": "4", "content": "test", "meta": {"year": "2021"}},
{"id": "5", "content": "test", "meta": {"year": "2021"}},
]
ds.write_documents(documents)
custom_query = """
{
"query": {
"bool": {
"must": [{
"multi_match": {
"query": ${query},
"fields": ["content"],
"fuzziness": "AUTO"
}
}],
"filter": ${filters}
}
}
}
"""
results = ds.query(query=query, filters=filters, custom_query=custom_query)
assert len(results) == result_count
@pytest.mark.document_store
class TestSearchEngineDocumentStore:
"""
This class tests the concrete methods in SearchEngineDocumentStore
"""
@pytest.mark.integration
def test__split_document_list(self):
pass
|
a7df642777ec3be2b0394007215c750f4c5627d0
|
063ec40f1bf8c156f84bf6dec5060e8f4427904b
|
/src/monosat/api/python/monosat/solver.py
|
704ec4463b6ba5137814c2f6804e60a22c25ac68
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
sambayless/monosat
|
10a6a8d8901529c070aa9cf11b3aea35d0e7f6db
|
cf9ab9c17de3b175059fbe38626021d010d65b0a
|
refs/heads/master
| 2023-05-11T07:43:17.302876
| 2023-04-30T17:50:07
| 2023-04-30T17:50:07
| 29,748,191
| 104
| 42
|
MIT
| 2023-04-30T17:50:08
| 2015-01-23T19:05:46
|
C++
|
UTF-8
|
Python
| false
| false
| 4,411
|
py
|
solver.py
|
import time
from monosat.bvtheory import BitVector
from monosat.logic import *
from monosat.monosat_c import Monosat
from monosat.pbtheory import PBManager
def FoundOptimal():
return Monosat().lastSolutionWasOptimal()
class SolveException(Exception):
pass
def Solve(
*assumptions,
preprocessing=True,
bvs_to_minimize=None,
time_limit_seconds=None,
conflict_limit=None
):
WriteConstraints()
if time_limit_seconds is None or time_limit_seconds <= 0:
time_limit_seconds = -1
if conflict_limit is None or conflict_limit <= 0:
conflict_limit = -1
Monosat().setTimeLimit(time_limit_seconds)
Monosat().setConflictLimit(conflict_limit)
# if preprocessing:
# Monosat().preprocess();
# print("Solving in Monosat...")
t = time.process_time()
if isinstance(assumptions, Var):
assumptions = [assumptions]
elif assumptions is None:
assumptions = []
elif len(assumptions) == 1 and not isinstance(assumptions[0], Var):
try:
if len(assumptions[0]) != None:
assumptions = assumptions[0]
except:
pass
if isinstance(bvs_to_minimize, BitVector):
bvs_to_minimize = [bvs_to_minimize]
elif bvs_to_minimize is None:
bvs_to_minimize = []
for bv in bvs_to_minimize:
bvID = bv.getID()
Monosat().minimizeBV(bvID)
r = Monosat().solveLimited([x.getLit() for x in assumptions])
if r is None:
raise SolveException(
"MonoSAT aborted before solving (possibly do to a time or memory limit)"
)
Monosat().elapsed_time += time.process_time() - t
found_optimal = Monosat().lastSolutionWasOptimal()
if r is None:
raise SolveException(
"MonoSAT aborted before solving (possibly due to a time or memory limit)"
)
elif r and not found_optimal:
print(
"MonoSAT found a satisfying solution, but it might not be optimal (due to a time or memory limit)"
)
return r
# If the most recent solve() call was UNSAT, returns a (negated) subset of the assumptions, at least one of which must be
# true in any satisfying solution.
# If minimize is true, the solver will expend effort trying to find a locally minimla subest
def getConflictClause(minimize=False):
if minimize:
Monosat().minimizeConflictClause()
conf_clause = Monosat().getConflictClause()
if conf_clause is None:
return None
else:
vars = []
for v in conf_clause:
vars.append(Var(v))
return vars
def minimizeUnsatCore(assumptions):
assumption_ints = [a.lit for a in assumptions]
conf_clause = Monosat().minimizeUnsatCore(assumption_ints)
assert conf_clause is not None
vars = []
for v in conf_clause:
vars.append(Var(v))
return vars
# optimization support
def clearOptimizationObjectives():
Monosat().clearOptimizationObjectives()
def maximize(bitvector_or_literals, weights=None):
if isinstance(bitvector_or_literals, Var):
arg = [bitvector_or_literals]
if isinstance(weights, int):
weights = [weights]
if isinstance(bitvector_or_literals, BitVector):
Monosat().maximizeBV(bitvector_or_literals.getID())
else:
lit_ints = [l.getLit() for l in bitvector_or_literals]
if weights is None:
Monosat().maximizeLits(lit_ints)
else:
Monosat().maximizeWeightedLits(lit_ints, weights)
def minimize(bitvector_or_literals, weights=None):
if isinstance(bitvector_or_literals, Var):
bitvector_or_literals = [bitvector_or_literals]
if isinstance(weights, int):
weights = [weights]
if isinstance(bitvector_or_literals, BitVector.getID()):
Monosat().minimizeBV(bitvector_or_literals)
else:
lit_ints = [l.getLit() for l in bitvector_or_literals]
if weights is None:
Monosat().minimizeLits(lit_ints)
else:
Monosat().minimizeWeightedLits(lit_ints, weights)
def WriteConstraints():
_writePBCosntraints()
def _writePBCosntraints():
# write any pseudoboolean constraints
if not PBManager().hasConstraints():
return
t = time.process_time()
pbmgr = PBManager()
pbmgr.flush()
d = time.process_time()
PBManager().elapsed_time += d - t
|
18d083b07ddbc6d4ba07be14f883b8f441681a21
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/procedures/definitions/win32_webauthn.py
|
e29d4a196b0220de5146fafa899ea62d610bc8fd
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 13,821
|
py
|
win32_webauthn.py
|
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("webauthn.dll")
prototypes = \
{
#
'WebAuthNGetApiVersionNumber': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")),
#
'WebAuthNIsUserVerifyingPlatformAuthenticatorAvailable': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pbIsUserVerifyingPlatformAuthenticatorAvailable"]),
#
'WebAuthNAuthenticatorMakeCredential': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "pwszId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszIcon": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_RP_ENTITY_INFORMATION", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "pbId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszIcon": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszDisplayName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_USER_ENTITY_INFORMATION", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"cCredentialParameters": SimTypeInt(signed=False, label="UInt32"), "pCredentialParameters": SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "pwszCredentialType": SimTypePointer(SimTypeChar(label="Char"), offset=0), "lAlg": SimTypeInt(signed=True, label="Int32")}, name="WEBAUTHN_COSE_CREDENTIAL_PARAMETER", pack=False, align=None), offset=0)}, name="WEBAUTHN_COSE_CREDENTIAL_PARAMETERS", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbClientDataJSON": SimTypeInt(signed=False, label="UInt32"), "pbClientDataJSON": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszHashAlgId": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_CLIENT_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "dwTimeoutMilliseconds": SimTypeInt(signed=False, label="UInt32"), "CredentialList": SimStruct({"cCredentials": SimTypeInt(signed=False, label="UInt32"), "pCredentials": SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "pbId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszCredentialType": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_CREDENTIAL", pack=False, align=None), offset=0)}, name="WEBAUTHN_CREDENTIALS", pack=False, align=None), "Extensions": SimStruct({"cExtensions": SimTypeInt(signed=False, label="UInt32"), "pExtensions": SimTypePointer(SimStruct({"pwszExtensionIdentifier": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbExtension": SimTypeInt(signed=False, label="UInt32"), "pvExtension": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="WEBAUTHN_EXTENSION", pack=False, align=None), offset=0)}, name="WEBAUTHN_EXTENSIONS", pack=False, align=None), "dwAuthenticatorAttachment": SimTypeInt(signed=False, label="UInt32"), "bRequireResidentKey": SimTypeInt(signed=True, label="Int32"), "dwUserVerificationRequirement": SimTypeInt(signed=False, label="UInt32"), "dwAttestationConveyancePreference": SimTypeInt(signed=False, label="UInt32"), "dwFlags": SimTypeInt(signed=False, label="UInt32"), "pCancellationId": SimTypePointer(SimTypeBottom(label="Guid"), offset=0), "pExcludeCredentialList": SimTypePointer(SimStruct({"cCredentials": SimTypeInt(signed=False, label="UInt32"), "ppCredentials": SimTypePointer(SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "pbId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszCredentialType": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dwTransports": SimTypeInt(signed=False, label="UInt32")}, name="WEBAUTHN_CREDENTIAL_EX", pack=False, align=None), offset=0), offset=0)}, name="WEBAUTHN_CREDENTIAL_LIST", pack=False, align=None), offset=0)}, name="WEBAUTHN_AUTHENTICATOR_MAKE_CREDENTIAL_OPTIONS", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "pwszFormatType": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbAuthenticatorData": SimTypeInt(signed=False, label="UInt32"), "pbAuthenticatorData": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbAttestation": SimTypeInt(signed=False, label="UInt32"), "pbAttestation": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwAttestationDecodeType": SimTypeInt(signed=False, label="UInt32"), "pvAttestationDecode": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "cbAttestationObject": SimTypeInt(signed=False, label="UInt32"), "pbAttestationObject": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbCredentialId": SimTypeInt(signed=False, label="UInt32"), "pbCredentialId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Extensions": SimStruct({"cExtensions": SimTypeInt(signed=False, label="UInt32"), "pExtensions": SimTypePointer(SimStruct({"pwszExtensionIdentifier": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbExtension": SimTypeInt(signed=False, label="UInt32"), "pvExtension": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="WEBAUTHN_EXTENSION", pack=False, align=None), offset=0)}, name="WEBAUTHN_EXTENSIONS", pack=False, align=None), "dwUsedTransport": SimTypeInt(signed=False, label="UInt32")}, name="WEBAUTHN_CREDENTIAL_ATTESTATION", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hWnd", "pRpInformation", "pUserInformation", "pPubKeyCredParams", "pWebAuthNClientData", "pWebAuthNMakeCredentialOptions", "ppWebAuthNCredentialAttestation"]),
#
'WebAuthNAuthenticatorGetAssertion': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbClientDataJSON": SimTypeInt(signed=False, label="UInt32"), "pbClientDataJSON": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszHashAlgId": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_CLIENT_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "dwTimeoutMilliseconds": SimTypeInt(signed=False, label="UInt32"), "CredentialList": SimStruct({"cCredentials": SimTypeInt(signed=False, label="UInt32"), "pCredentials": SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "pbId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszCredentialType": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_CREDENTIAL", pack=False, align=None), offset=0)}, name="WEBAUTHN_CREDENTIALS", pack=False, align=None), "Extensions": SimStruct({"cExtensions": SimTypeInt(signed=False, label="UInt32"), "pExtensions": SimTypePointer(SimStruct({"pwszExtensionIdentifier": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbExtension": SimTypeInt(signed=False, label="UInt32"), "pvExtension": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="WEBAUTHN_EXTENSION", pack=False, align=None), offset=0)}, name="WEBAUTHN_EXTENSIONS", pack=False, align=None), "dwAuthenticatorAttachment": SimTypeInt(signed=False, label="UInt32"), "dwUserVerificationRequirement": SimTypeInt(signed=False, label="UInt32"), "dwFlags": SimTypeInt(signed=False, label="UInt32"), "pwszU2fAppId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pbU2fAppId": SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0), "pCancellationId": SimTypePointer(SimTypeBottom(label="Guid"), offset=0), "pAllowCredentialList": SimTypePointer(SimStruct({"cCredentials": SimTypeInt(signed=False, label="UInt32"), "ppCredentials": SimTypePointer(SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "pbId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszCredentialType": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dwTransports": SimTypeInt(signed=False, label="UInt32")}, name="WEBAUTHN_CREDENTIAL_EX", pack=False, align=None), offset=0), offset=0)}, name="WEBAUTHN_CREDENTIAL_LIST", pack=False, align=None), offset=0)}, name="WEBAUTHN_AUTHENTICATOR_GET_ASSERTION_OPTIONS", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbAuthenticatorData": SimTypeInt(signed=False, label="UInt32"), "pbAuthenticatorData": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbSignature": SimTypeInt(signed=False, label="UInt32"), "pbSignature": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Credential": SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "pbId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszCredentialType": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_CREDENTIAL", pack=False, align=None), "cbUserId": SimTypeInt(signed=False, label="UInt32"), "pbUserId": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WEBAUTHN_ASSERTION", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hWnd", "pwszRpId", "pWebAuthNClientData", "pWebAuthNGetAssertionOptions", "ppWebAuthNAssertion"]),
#
'WebAuthNFreeCredentialAttestation': SimTypeFunction([SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "pwszFormatType": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbAuthenticatorData": SimTypeInt(signed=False, label="UInt32"), "pbAuthenticatorData": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbAttestation": SimTypeInt(signed=False, label="UInt32"), "pbAttestation": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwAttestationDecodeType": SimTypeInt(signed=False, label="UInt32"), "pvAttestationDecode": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "cbAttestationObject": SimTypeInt(signed=False, label="UInt32"), "pbAttestationObject": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbCredentialId": SimTypeInt(signed=False, label="UInt32"), "pbCredentialId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Extensions": SimStruct({"cExtensions": SimTypeInt(signed=False, label="UInt32"), "pExtensions": SimTypePointer(SimStruct({"pwszExtensionIdentifier": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbExtension": SimTypeInt(signed=False, label="UInt32"), "pvExtension": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="WEBAUTHN_EXTENSION", pack=False, align=None), offset=0)}, name="WEBAUTHN_EXTENSIONS", pack=False, align=None), "dwUsedTransport": SimTypeInt(signed=False, label="UInt32")}, name="WEBAUTHN_CREDENTIAL_ATTESTATION", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pWebAuthNCredentialAttestation"]),
#
'WebAuthNFreeAssertion': SimTypeFunction([SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbAuthenticatorData": SimTypeInt(signed=False, label="UInt32"), "pbAuthenticatorData": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbSignature": SimTypeInt(signed=False, label="UInt32"), "pbSignature": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Credential": SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "pbId": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "pwszCredentialType": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WEBAUTHN_CREDENTIAL", pack=False, align=None), "cbUserId": SimTypeInt(signed=False, label="UInt32"), "pbUserId": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WEBAUTHN_ASSERTION", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pWebAuthNAssertion"]),
#
'WebAuthNGetCancellationId': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pCancellationId"]),
#
'WebAuthNCancelCurrentOperation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pCancellationId"]),
#
'WebAuthNGetErrorName': SimTypeFunction([SimTypeInt(signed=True, label="Int32")], SimTypePointer(SimTypeChar(label="Char"), offset=0), arg_names=["hr"]),
#
'WebAuthNGetW3CExceptionDOMError': SimTypeFunction([SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=True, label="Int32"), arg_names=["hr"]),
}
lib.set_prototypes(prototypes)
|
e4467a950c1e41580679dcd97c81c017c10f5edd
|
dab10c721000fd9eb38676d6b2730f155eedd54e
|
/recirq/toric_code/toric_code_plaquettes.py
|
02d99e431f705c82e3b4ce7cc55dc6fa999ef27a
|
[
"Apache-2.0"
] |
permissive
|
quantumlib/ReCirq
|
f45e55e432f2e29fb8f2fe35a3d436a629219e86
|
d021621a3837693ae9c5fdc5c05058de20fba314
|
refs/heads/master
| 2023-09-03T19:35:55.281836
| 2023-09-01T01:12:40
| 2023-09-01T01:12:40
| 246,951,354
| 260
| 116
|
Apache-2.0
| 2023-09-09T00:41:35
| 2020-03-12T23:51:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,319
|
py
|
toric_code_plaquettes.py
|
# Copyright 2022 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable, Tuple
import pandas as pd
from . import toric_code_rectangle as tcr
class ToricCodePlaquettes:
"""X and Z plaquette (stabilizer) expectation values."""
def __init__(
self,
code: tcr.ToricCodeRectangle,
x_plaquettes: Dict[Tuple[int, int], float],
z_plaquettes: Dict[Tuple[int, int], float],
):
"""
Args:
code: Toric code rectangle whose plaquette expectation values we store
x_plaquettes: Mapping from X plaquette index (row, col) to expectation value.
We expect rows in range(code.rows) and cols in range(code.cols).
z_plaquettes: Mapping from Z plaquette index (row, col) to expectation value.
We expect rows in range(code.rows + 1) and cols in range(code.cols + 1).
"""
self.code = code
self.x_plaquettes = x_plaquettes
self.z_plaquettes = z_plaquettes
def __repr__(self) -> str:
return (
f"ToricCodePlaquettes(code={self.code}, "
f"x_plaquettes={self.x_plaquettes}, z_plaquettes={self.z_plaquettes})"
)
@classmethod
def for_uniform_parity(
cls, code: tcr.ToricCodeRectangle, x_value: float, z_value: float
) -> "ToricCodePlaquettes":
"""Create plaquettes with uniform X and Z expectation values."""
x_plaquettes = {(row, col): x_value for row, col in code.x_plaquette_indices()}
z_plaquettes = {(row, col): z_value for row, col in code.z_plaquette_indices()}
return cls(code, x_plaquettes, z_plaquettes)
@classmethod
def from_global_measurements(
cls, code: tcr.ToricCodeRectangle, x_data: pd.DataFrame, z_data: pd.DataFrame
) -> "ToricCodePlaquettes":
"""Compute stabilizer expectation values from global measurement data.
Args:
code: Toric code rectangle whose plaquette expectation values we store
x_data: Result of global measurements in the X basis. DataFrame with a single column of
integer values, one for each measurement outcome, as in cirq.Result.data.
z_data: Result of global measurements in the Z basis, similar to x_data.
Returns:
ToricCodePlaquettes with expectation values calculated from data
"""
x_plaquettes: Dict[Tuple[int, int], float] = {
(row, col): cls.expectation_value(code, x_data, row, col, x_basis=True)
for row, col in code.x_plaquette_indices()
}
z_plaquettes: Dict[Tuple[int, int], float] = {
(row, col): cls.expectation_value(code, z_data, row, col, x_basis=False)
for row, col in code.z_plaquette_indices()
}
return cls(code, x_plaquettes, z_plaquettes)
@classmethod
def expectation_value(
cls,
code: tcr.ToricCodeRectangle,
data: pd.DataFrame,
row: int,
col: int,
x_basis: bool,
) -> float:
"""Compute an expectation value for a single X or Z plaquette.
Args:
code: Toric code rectangle with this plaquette
data: DataFrame with a single column of integer values, one for each measurement
outcome, as in cirq.Result.data
row: Plaquette row
col: Plaquette column
x_basis: If True, look at the X plaquette at (row, col); otherwise, look at the Z
plaquette at (row, col)
Returns:
Expectation value of the plaquette, between -1 and 1
"""
if x_basis:
qubit_idxs = code.x_plaquette_to_qubit_idxs(row, col)
else:
qubit_idxs = code.z_plaquette_to_qubit_idxs(row, col)
total_qubits = len(code.qubits)
parities = data.applymap(
lambda value: cls.compute_parity(value, qubit_idxs, total_qubits)
)
return float(parities.mean())
@staticmethod
def compute_parity(value: int, qubit_idxs: Iterable[int], total_qubits: int) -> int:
"""Compute the parity of a set of qubits for a given measurement.
Args:
value: Big-endian packed integer of qubit measurement outcomes
qubit_idxs: Select the measurement outcomes for qubits at these indices
total_qubits: Total number of qubits. This is needed to know how many bits to use when
expanding value in binary; it determines where idx=0 is.
Returns:
+1 for even parity (even number of 1s), -1 for odd parity (odd number of 1s)
"""
bitstring = f"{value:0{total_qubits}b}"
number_of_ones = sum(int(bitstring[idx]) for idx in qubit_idxs)
return (-1) ** number_of_ones
|
c1f06b5804fd770c09c2ad62d67d9972b88a3734
|
450916eee7580beb928ed8f387db4f0a8c1aa508
|
/src/amuse/test/suite/compile_tests/test_python_implementation_mpi.py
|
b1a9c31a2d11e20b0b13792a3943d7fedf0ab289
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
amusecode/amuse
|
42095545893f5a86ea79c2a52ce54d3ce8eb204f
|
b57c1e2fda1457d5025307be105c2aa59b19b574
|
refs/heads/main
| 2023-08-31T04:50:48.880044
| 2023-08-30T12:00:20
| 2023-08-30T12:00:20
| 18,516,331
| 158
| 118
|
Apache-2.0
| 2023-08-30T12:00:22
| 2014-04-07T12:35:07
|
AMPL
|
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
test_python_implementation_mpi.py
|
from amuse.support.interface import InCodeComponentImplementation
from amuse.test.amusetest import TestWithMPI
from amuse import datamodel
from amuse.rfi.core import PythonCodeInterface, legacy_function, LegacyFunctionSpecification
from amuse.support import exceptions
import numpy
from mpi4py import MPI
class ForTestingInterface(PythonCodeInterface):
def __init__(self, **options):
PythonCodeInterface.__init__(self, implementation_factory=ForTestingImplementation, **options)
@legacy_function
def get_range():
function = LegacyFunctionSpecification()
function.addParameter('imin', dtype='int32', direction=function.OUT)
function.addParameter('imax', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_x():
function = LegacyFunctionSpecification()
function.addParameter('index', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def get_dens():
function = LegacyFunctionSpecification()
function.addParameter('index', dtype='int32', direction=function.IN)
function.addParameter('dens', dtype='float64', direction=function.OUT)
function.addParameter('N', dtype='int32', direction=function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
return function
class ForTestingImplementation(object):
def __init__(self):
self.comm = MPI.COMM_WORLD
self.myrank = self.comm.Get_rank()
self.N = self.comm.Get_size()
self.Ngrid = 3*4*5
n = self.Ngrid//self.N
x = (numpy.arange(n)+self.myrank*n)/(1.*self.Ngrid)
self.local_imin = self.myrank*n
self.local_imax = (self.myrank+1)*n-1
self.dens = x**2
def get_range(self, imin, imax):
imin.value = 0
imax.value = self.Ngrid-1
return 0
def get_x(self, index, x):
x.value = index/(1.*self.Ngrid)
return 0
def get_dens(self, index, dens, N):
a = (index >= self.local_imin)*(index <= self.local_imax)
_dens = numpy.zeros(N)
_dens[a] = self.dens[index[a]-self.local_imin]
dens.value = numpy.zeros(N)
_dens = self.comm.Reduce(_dens, dens.value, MPI.SUM, root=0)
return 0
class ForTesting(InCodeComponentImplementation):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, ForTestingInterface(**options), **options)
def define_grids(self, object):
object.define_grid('grid', axes_names=['x'], grid_class=datamodel.CartesianGrid)
object.set_grid_range('grid', 'get_range')
object.add_getter('grid', 'get_dens', names=('dens',))
object.add_getter('grid', 'get_x', names=('x',))
class TestInterface(TestWithMPI):
def ForTesting(self, **options):
options["worker_dir"] = self.get_path_to_results()
return ForTesting(**options)
def ForTestingInterface(self, **options):
options["worker_dir"] = self.get_path_to_results()
return ForTestingInterface(**options)
def test1(self):
interface = self.ForTesting(redirection="none", number_of_workers=1)
x = interface.grid.x
dens = interface.grid.dens
self.assertEqual(x, numpy.arange(60)/60.)
self.assertEqual(dens, x**2)
interface.stop()
def test2(self):
for n in [3, 5, 6]:
interface = self.ForTesting(redirection="none", number_of_workers=n)
x = interface.grid.x
dens = interface.grid.dens
self.assertEqual(x, numpy.arange(60)/60.)
self.assertEqual(dens, x**2)
interface.stop()
|
6884bcc6e824548b807bcecc5d35d7b48843696e
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-vcm/huaweicloudsdkvcm/v2/__init__.py
|
98ce978fb47f34d75f14d40dd89c9f6738d1e856
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,937
|
py
|
__init__.py
|
# coding: utf-8
from __future__ import absolute_import
from huaweicloudsdkvcm.v2.vcm_client import VcmClient
from huaweicloudsdkvcm.v2.vcm_async_client import VcmAsyncClient
from huaweicloudsdkvcm.v2.model.audio_input_data import AudioInputData
from huaweicloudsdkvcm.v2.model.audio_output import AudioOutput
from huaweicloudsdkvcm.v2.model.audio_output_hosting import AudioOutputHosting
from huaweicloudsdkvcm.v2.model.audio_output_hosting_obs import AudioOutputHostingObs
from huaweicloudsdkvcm.v2.model.audio_output_obs import AudioOutputObs
from huaweicloudsdkvcm.v2.model.audio_response_output import AudioResponseOutput
from huaweicloudsdkvcm.v2.model.audio_response_output_hosting import AudioResponseOutputHosting
from huaweicloudsdkvcm.v2.model.audio_service_config import AudioServiceConfig
from huaweicloudsdkvcm.v2.model.audio_service_config_common import AudioServiceConfigCommon
from huaweicloudsdkvcm.v2.model.check_audio_job_request import CheckAudioJobRequest
from huaweicloudsdkvcm.v2.model.check_audio_job_response import CheckAudioJobResponse
from huaweicloudsdkvcm.v2.model.check_audio_job_response_body import CheckAudioJobResponseBody
from huaweicloudsdkvcm.v2.model.check_audio_job_response_body_input import CheckAudioJobResponseBodyInput
from huaweicloudsdkvcm.v2.model.check_video_job_request import CheckVideoJobRequest
from huaweicloudsdkvcm.v2.model.check_video_job_response import CheckVideoJobResponse
from huaweicloudsdkvcm.v2.model.create_audio_job_request import CreateAudioJobRequest
from huaweicloudsdkvcm.v2.model.create_audio_job_request_body import CreateAudioJobRequestBody
from huaweicloudsdkvcm.v2.model.create_audio_job_request_body_input import CreateAudioJobRequestBodyInput
from huaweicloudsdkvcm.v2.model.create_audio_job_response import CreateAudioJobResponse
from huaweicloudsdkvcm.v2.model.create_audio_job_response_body import CreateAudioJobResponseBody
from huaweicloudsdkvcm.v2.model.create_video_job_req import CreateVideoJobReq
from huaweicloudsdkvcm.v2.model.create_video_job_req_input import CreateVideoJobReqInput
from huaweicloudsdkvcm.v2.model.create_video_job_request import CreateVideoJobRequest
from huaweicloudsdkvcm.v2.model.create_video_job_response import CreateVideoJobResponse
from huaweicloudsdkvcm.v2.model.create_video_job_response_body import CreateVideoJobResponseBody
from huaweicloudsdkvcm.v2.model.delete_demo_info_request import DeleteDemoInfoRequest
from huaweicloudsdkvcm.v2.model.delete_demo_info_response import DeleteDemoInfoResponse
from huaweicloudsdkvcm.v2.model.delete_video_job_request import DeleteVideoJobRequest
from huaweicloudsdkvcm.v2.model.delete_video_job_response import DeleteVideoJobResponse
from huaweicloudsdkvcm.v2.model.input_data import InputData
from huaweicloudsdkvcm.v2.model.input_vcn import InputVcn
from huaweicloudsdkvcm.v2.model.list_audio_jobs_request import ListAudioJobsRequest
from huaweicloudsdkvcm.v2.model.list_audio_jobs_response import ListAudioJobsResponse
from huaweicloudsdkvcm.v2.model.list_video_jobs_request import ListVideoJobsRequest
from huaweicloudsdkvcm.v2.model.list_video_jobs_response import ListVideoJobsResponse
from huaweicloudsdkvcm.v2.model.output import Output
from huaweicloudsdkvcm.v2.model.output_dis import OutputDis
from huaweicloudsdkvcm.v2.model.output_hosting import OutputHosting
from huaweicloudsdkvcm.v2.model.output_hosting_obs import OutputHostingObs
from huaweicloudsdkvcm.v2.model.output_localpath import OutputLocalpath
from huaweicloudsdkvcm.v2.model.output_obs import OutputObs
from huaweicloudsdkvcm.v2.model.output_webhook import OutputWebhook
from huaweicloudsdkvcm.v2.model.service_config import ServiceConfig
from huaweicloudsdkvcm.v2.model.service_config_common import ServiceConfigCommon
from huaweicloudsdkvcm.v2.model.video_job_response import VideoJobResponse
from huaweicloudsdkvcm.v2.model.video_job_response_hosting_result import VideoJobResponseHostingResult
|
5e9379f976907dd64fc7a1d850d0220d24f81286
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/etl/extractor/sql.py
|
f3d169f211823e83d2dd5e92800901fd2d520d75
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
sql.py
|
# ----------------------------------------------------------------------
# SQL Data Extractor
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from .base import BaseExtractor
class SQLExtractor(BaseExtractor):
SQL = "SELECT 1"
def get_sql(self):
"""
Returns tuple of SQL Query, list of bind parameters
"""
if isinstance(self.SQL, list):
for sql in self.SQL:
yield sql, []
else:
yield self.SQL, []
|
5503ee9cf47c8f05efa6ace2ac58b7c3b7f76e5a
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/data_analytics/esaccessors.py
|
36d869df8cf6ef6f0702da103b28bcf78e17d2ec
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,283
|
py
|
esaccessors.py
|
import datetime
from dimagi.utils.dates import add_months
from corehq.apps.data_analytics.const import DEFAULT_EXPERIENCED_THRESHOLD
from corehq.apps.es.aggregations import (
AggregationTerm,
NestedTermAggregationsHelper,
TermsAggregation,
)
from corehq.apps.es.forms import FormES
from corehq.apps.es.sms import SMSES
from corehq.apps.es.users import UserES
def get_app_submission_breakdown_es(domain_name, monthspan, user_ids=None):
# takes > 1 m to load at 50k worker scale
terms = [
AggregationTerm('app_id', 'app_id'),
AggregationTerm('device_id', 'form.meta.deviceID'),
AggregationTerm('user_id', 'form.meta.userID'),
]
query = FormES(for_export=True).domain(domain_name).submitted(
gte=monthspan.startdate,
lt=monthspan.computed_enddate,
)
if user_ids is not None:
query = query.user_id(user_ids)
return NestedTermAggregationsHelper(base_query=query, terms=terms).get_data()
def get_domain_device_breakdown_es(domain_name, monthspan):
query = FormES(for_export=True).domain(domain_name).submitted(
gte=monthspan.startdate,
lt=monthspan.computed_enddate,
).aggregation(TermsAggregation('device_id', 'form.meta.deviceID')).size(0)
return query.run().aggregations.device_id.counts_by_bucket()
def get_mobile_users(domains):
return set(
UserES()
.show_inactive()
.mobile_users()
.domain(domains)
.scroll_ids()
)
def active_mobile_users(domain, start, end, *args):
"""
Returns the number of mobile users who have submitted a form or SMS in the
time specified
"""
user_ids = get_mobile_users(domain.name)
form_users = (FormES(for_export=True)
.domain(domain.name)
.user_aggregation()
.submitted(gte=start, lt=end)
.user_id(user_ids)
.size(0)
.run()
.aggregations.user.counts_by_bucket())
sms_users = set(
SMSES(for_export=True)
.incoming_messages()
.user_aggregation()
.to_commcare_user()
.domain(domain.name)
.received(gte=start, lt=end)
.size(0)
.run()
.aggregations.user.keys
)
return set(user_ids), form_users, sms_users
def get_forms_for_users(domain, user_ids, start, end):
query = (
FormES(for_export=True)
.domain(domain)
.submitted(gte=start, lte=end)
.user_id(user_ids)
.source(['form.meta.userID', 'form.case', 'form.@xmlns'])
)
return query.scroll()
def get_possibly_experienced(domain, start):
user_ids = get_mobile_users(domain.name)
threshold = domain.internal.experienced_threshold or DEFAULT_EXPERIENCED_THRESHOLD
months = threshold - 2
threshold_month = add_months(start.startdate.year, start.startdate.month, -months)
end_month = datetime.date(day=1, year=threshold_month[0], month=threshold_month[1])
form_users = set(
FormES(for_export=True)
.domain(domain.name)
.user_aggregation()
.submitted(lt=end_month)
.user_id(user_ids)
.size(0)
.run()
.aggregations.user.keys
)
return set(form_users)
|
8af10c9f678dfeecb4228c7dbf646d8eb4318d68
|
7a6b4705293709e32a6927ad4f76eb0549f3bea9
|
/orchestra/todos/auth.py
|
eca84f87fe10450556961968b225f1ad5c8faa66
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
b12io/orchestra
|
a71941d80d1eeddb73f301da8f601b2c31a4b279
|
ee8a29122a3491feae1e1c2c4699142726ae6c21
|
refs/heads/main
| 2023-08-20T17:46:36.360755
| 2023-06-27T13:32:46
| 2023-06-27T13:32:46
| 42,593,972
| 459
| 66
|
Apache-2.0
| 2023-06-27T13:32:48
| 2015-09-16T14:55:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
auth.py
|
from rest_framework import permissions
from orchestra.models import Worker
from orchestra.models import Todo
from orchestra.models import TodoQA
class IsAssociatedWithTodosProject(permissions.BasePermission):
"""
Ensures that a user's worker is accoiated with the todo's project.
"""
def has_object_permission(self, request, view, obj):
worker = Worker.objects.get(user=request.user)
if isinstance(obj, Todo):
project = obj.project
elif isinstance(obj, TodoQA):
project = obj.todo.project
else:
project = None
return (
project and
(worker.is_project_admin() or
worker.assignments.filter(task__project=project).exists()))
class IsAssociatedWithProject(permissions.BasePermission):
"""
Ensures that a user's worker is associated with the request's
`project`.
"""
def has_permission(self, request, view):
"""
We pass project_id as a payload in cases when the request
is either POST, PUT or PATCH. It can be passed via query param
not only in a GET request, but also in the requests listed above
(when applying a filter).
"""
worker = Worker.objects.get(user=request.user)
if worker.is_project_admin():
return True
todo_id = request.data.get('todo')
if todo_id is None:
todo_id = view.kwargs.get('pk')
project_id = request.data.get(
'project') or request.data.get('project__id')
if project_id is None:
project_id = request.query_params.get(
'project') or request.query_params.get('project__id')
if project_id is None and todo_id is not None:
project_id = Todo.objects.get(id=todo_id).project.id
return worker.assignments.filter(task__project__id=project_id).exists()
class IsAssociatedWithTask(permissions.BasePermission):
"""
Ensures that a user's worker is associated with the request's
`task`.
"""
def has_permission(self, request, view):
worker = Worker.objects.get(user=request.user)
if worker.is_project_admin():
return True
if request.method == 'GET':
task_id = request.query_params.get('task')
return worker.assignments.filter(task=task_id).exists()
return False
|
6477ba6e4525ac5d5e7993b0d632fc27b8faad06
|
3c0b87aae2ecca8245ddf7f26d0934bde23c7d8b
|
/pixyz/losses/mmd.py
|
b6a6c2c9ceac03745e45c24f3d476e83943ecb56
|
[
"MIT"
] |
permissive
|
masa-su/pixyz
|
a36e43c620d691b05bbcff202d44570468c10cab
|
a9baf067730035d03351476f5c2e1e43016808ce
|
refs/heads/main
| 2023-08-21T12:27:35.075909
| 2021-12-14T14:15:36
| 2021-12-14T14:15:36
| 135,975,723
| 483
| 51
|
MIT
| 2023-07-30T03:54:54
| 2018-06-04T05:41:09
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
mmd.py
|
import torch
import sympy
from .losses import Divergence
from ..utils import get_dict_values
class MMD(Divergence):
r"""
The Maximum Mean Discrepancy (MMD).
.. math::
D_{MMD^2}[p||q] = \mathbb{E}_{p(x), p(x')}[k(x, x')] + \mathbb{E}_{q(x), q(x')}[k(x, x')]
- 2\mathbb{E}_{p(x), q(x')}[k(x, x')]
where :math:`k(x, x')` is any positive definite kernel.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="p")
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="q")
>>> loss_cls = MMD(p, q, kernel="gaussian")
>>> print(loss_cls)
D_{MMD^2} \left[p(z|x)||q(z|x) \right]
>>> loss = loss_cls.eval({"x": torch.randn(1, 64)})
>>> # Use the inverse (multi-)quadric kernel
>>> loss = MMD(p, q, kernel="inv-multiquadratic").eval({"x": torch.randn(10, 64)})
"""
def __init__(self, p, q, kernel="gaussian", **kernel_params):
if set(p.var) != set(q.var):
raise ValueError("The two distribution variables must be the same.")
if len(p.var) != 1:
raise ValueError("A given distribution must have only one variable.")
super().__init__(p, q)
if len(p.input_var) > 0:
self.input_dist = p
elif len(q.input_var) > 0:
self.input_dist = q
else:
raise NotImplementedError()
if kernel == "gaussian":
self.kernel = gaussian_rbf_kernel
elif kernel == "inv-multiquadratic":
self.kernel = inverse_multiquadratic_rbf_kernel
else:
raise NotImplementedError()
self.kernel_params = kernel_params
@property
def _symbol(self):
return sympy.Symbol("D_{{MMD^2}} \\left[{}||{} \\right]".format(self.p.prob_text, self.q.prob_text))
def _get_batch_n(self, x_dict):
return get_dict_values(x_dict, self.input_dist.input_var[0])[0].shape[0]
def forward(self, x_dict={}, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample from distributions
p_x = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.p.var)[0]
q_x = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.q.var)[0]
if p_x.shape != q_x.shape:
raise ValueError("The two distribution variables must have the same shape.")
if len(p_x.shape) != 2:
raise ValueError("The number of axes of a given sample must be 2, got %d" % len(p_x.shape))
p_x_dim = p_x.shape[1]
q_x_dim = q_x.shape[1]
# estimate the squared MMD (unbiased estimator)
p_kernel = self.kernel(p_x, p_x, **self.kernel_params).sum() / (p_x_dim * (p_x_dim - 1))
q_kernel = self.kernel(q_x, q_x, **self.kernel_params).sum() / (q_x_dim * (q_x_dim - 1))
pq_kernel = self.kernel(p_x, q_x, **self.kernel_params).sum() / (p_x_dim * q_x_dim)
mmd_loss = p_kernel + q_kernel - 2 * pq_kernel
return mmd_loss, {}
def pairwise_distance_matrix(x, y, metric="euclidean"):
r"""
Computes the pairwise distance matrix between x and y.
"""
if metric == "euclidean":
return torch.sum((x[:, None, :] - y[None, :, :]) ** 2, dim=-1)
raise NotImplementedError()
def gaussian_rbf_kernel(x, y, sigma_sqr=2., **kwargs):
r"""
Gaussian radial basis function (RBF) kernel.
.. math::
k(x, y) = \exp (\frac{||x-y||^2}{\sigma^2})
"""
return torch.exp(-pairwise_distance_matrix(x, y) / (1. * sigma_sqr))
def inverse_multiquadratic_rbf_kernel(x, y, sigma_sqr=2., **kwargs):
r"""
Inverse multi-quadratic radial basis function (RBF) kernel.
.. math::
k(x, y) = \frac{\sigma^2}{||x-y||^2 + \sigma^2}
"""
return sigma_sqr / (pairwise_distance_matrix(x, y) + sigma_sqr)
|
a650c2769ffc8984e69abfa2e812a6ade007365f
|
d237f2925bb0bf47a3bac9bbac153c8fc9ef3b73
|
/tools/odrivetool
|
e63a06662021f3d946eeee3867626cbf05b869f9
|
[
"MIT"
] |
permissive
|
odriverobotics/ODrive
|
eb9ee34bbca34b93cc6c93c64f21203ed2392631
|
58fdd3fdfdcaeff76547870f5e7acdf1673479e8
|
refs/heads/master
| 2023-08-28T00:35:46.214421
| 2023-05-09T10:18:21
| 2023-05-09T10:18:21
| 57,061,619
| 1,444
| 754
|
MIT
| 2023-05-19T04:35:41
| 2016-04-25T17:26:04
|
C++
|
UTF-8
|
Python
| false
| false
| 9,493
|
odrivetool
|
#!/usr/bin/env python3
"""
ODrive command line utility
"""
from __future__ import print_function
import sys
# We require Python 3.5 for the "async def" syntax.
if sys.version_info <= (3, 5):
print("Your Python version (Python {}.{}) is too old. Please install Python 3.5 or newer.".format(
sys.version_info.major, sys.version_info.minor
))
exit(1)
import sys
import os
import argparse
import time
import math
import odrive
from odrive.utils import OperationAbortedException
from odrive.configuration import *
from fibre import Logger, Event
# Flush stdout by default
# Source:
# https://stackoverflow.com/questions/230751/how-to-flush-output-of-python-print
old_print = print
def print(*args, **kwargs):
kwargs.pop('flush', False)
old_print(*args, **kwargs)
file = kwargs.get('file', sys.stdout)
file.flush() if file is not None else sys.stdout.flush()
script_path=os.path.dirname(os.path.realpath(__file__))
## Parse arguments ##
parser = argparse.ArgumentParser(description='ODrive command line utility\n'
'Running this tool without any arguments is equivalent to running `odrivetool shell`\n',
formatter_class=argparse.RawTextHelpFormatter)
# Subcommands
subparsers = parser.add_subparsers(help='sub-command help', dest='command')
shell_parser = subparsers.add_parser('shell', help='Drop into an interactive python shell that lets you interact with the ODrive(s)')
shell_parser.add_argument("--no-ipython", action="store_true",
help="Use the regular Python shell "
"instead of the IPython shell, "
"even if IPython is installed.")
dfu_parser = subparsers.add_parser('dfu', help="Upgrade the ODrive device firmware."
"If no serial number is specified, the first ODrive that is found is updated")
dfu_parser.add_argument('file', metavar='HEX', nargs='?',
help='The .hex file to be flashed. Make sure target board version '
'of the firmware file matches the actual board version. '
'You can download the latest release manually from '
'https://github.com/madcowswe/ODrive/releases. '
'If no file is provided, the script automatically downloads '
'the latest firmware.')
unlock_parser = subparsers.add_parser('unlock', help="Try to remove read-out protection."
"If no serial number is specified, the first ODrive that is found is unlocked")
dfu_parser = subparsers.add_parser('backup-config', help="Saves the configuration of the ODrive to a JSON file")
dfu_parser.add_argument('file', nargs='?',
help="Path to the file where to store the data. "
"If no path is provided, the configuration is stored in {}.".format(tempfile.gettempdir()))
dfu_parser = subparsers.add_parser('restore-config', help="Restores the configuration of the ODrive from a JSON file")
dfu_parser.add_argument('file', nargs='?',
help="Path to the file that contains the configuration data. "
"If no path is provided, the configuration is loaded from {}.".format(tempfile.gettempdir()))
subparsers.add_parser('liveplotter', help="For plotting of odrive parameters (i.e. position) in real time")
subparsers.add_parser('drv-status', help="Show status of the on-board DRV8301 chips (for debugging only)")
subparsers.add_parser('rate-test', help="Estimate the average transmission bandwidth over USB")
subparsers.add_parser('udev-setup', help="Linux only: Gives users on your system permission to access the ODrive by installing udev rules")
# General arguments
parser.add_argument("-p", "--path", metavar="PATH", action="store",
help="The path(s) where ODrive(s) should be discovered.\n"
"By default the script will connect to any ODrive on USB.\n\n"
"To select a specific USB device:\n"
" --path usb:BUS:DEVICE\n"
"usbwhere BUS and DEVICE are the bus and device numbers as shown in `lsusb`.\n\n"
"To select a specific serial port:\n"
" --path serial:PATH\n"
"where PATH is the path of the serial port. For example \"/dev/ttyUSB0\".\n"
"You can use `ls /dev/tty*` to find the correct port.\n\n"
"You can combine USB and serial specs by separating them with a comma (no space!)\n"
"Example:\n"
" --path usb,serial:/dev/ttyUSB0\n"
"means \"discover any USB device or a serial device on /dev/ttyUSB0\"")
parser.add_argument("-s", "--serial-number", action="store",
help="The 12-digit serial number of the device. "
"This is a string consisting of 12 upper case hexadecimal "
"digits as displayed in lsusb. \n"
" example: 385F324D3037\n"
"You can list all devices connected to USB by running\n"
"(lsusb -d 1209:0d32 -v; lsusb -d 0483:df11 -v) | grep iSerial\n"
"If omitted, any device is accepted.")
parser.add_argument("-v", "--verbose", action="store_true",
help="print debug information")
parser.add_argument("--version", action="store_true",
help="print version information and exit")
parser.set_defaults(path="usb:idVendor=0x1209,idProduct=0x0D32,bInterfaceClass=0,bInterfaceSubClass=1,bInterfaceProtocol=0")
args = parser.parse_args()
# Default command
if args.command is None:
args.command = 'shell'
args.no_ipython = False
logger = Logger(verbose=args.verbose)
def print_version():
sys.stderr.write("ODrive control utility v" + odrive.__version__ + "\n")
sys.stderr.flush()
app_shutdown_token = Event()
try:
if args.version == True:
print_version()
elif args.command == 'shell':
print_version()
if ".dev" in odrive.__version__:
print("")
logger.warn("Developer Preview")
print(" If you find issues, please report them")
print(" on https://github.com/madcowswe/ODrive/issues")
print(" or better yet, submit a pull request to fix it.")
print("")
import odrive.shell
odrive.shell.launch_shell(args, logger)
elif args.command == 'dfu':
print_version()
import platform
if platform.system() == "Windows":
logger.warn("DFU is unreliable on Windows. If it fails, please use "
"the DFU switch to force DFU mode. You can also try the "
"developer preview of odrivetool with `python -m pip install odrive --upgrade --pre`.\n"
"Also see https://docs.odriverobotics.com/odrivetool#upgrading-firmware-with-a-different-dfu-tool for other options.")
import odrive.dfu
odrive.dfu.launch_dfu(args, logger, app_shutdown_token)
elif args.command == 'unlock':
print_version()
import odrive.dfu
odrive.dfu.unlock_device(args.serial_number, app_shutdown_token)
elif args.command == 'liveplotter':
from odrive.utils import start_liveplotter
print("Waiting for ODrive...")
my_odrive = odrive.find_any(path=args.path, serial_number=args.serial_number)
# If you want to plot different values, change them here.
# You can plot any number of values concurrently.
cancellation_token = start_liveplotter(lambda: [
my_odrive.axis0.encoder.pos_estimate,
my_odrive.axis1.encoder.pos_estimate,
])
print("Showing plot. Press Ctrl+C to exit.")
while not cancellation_token.is_set():
time.sleep(1)
elif args.command == 'drv-status':
from odrive.utils import print_drv_regs
print("Waiting for ODrive...")
my_odrive = odrive.find_any(path=args.path, serial_number=args.serial_number)
print_drv_regs("Motor 0", my_odrive.axis0.motor)
print_drv_regs("Motor 1", my_odrive.axis1.motor)
elif args.command == 'rate-test':
from odrive.utils import rate_test
print("Waiting for ODrive...")
my_odrive = odrive.find_any(path=args.path, serial_number=args.serial_number)
rate_test(my_odrive)
elif args.command == 'udev-setup':
from odrive.version import setup_udev_rules
setup_udev_rules(logger)
elif args.command == 'backup-config':
from odrive.configuration import backup_config
print("Waiting for ODrive...")
my_odrive = odrive.find_any(path=args.path, serial_number=args.serial_number)
backup_config(my_odrive, args.file, logger)
elif args.command == 'restore-config':
from odrive.configuration import restore_config
print("Waiting for ODrive...")
my_odrive = odrive.find_any(path=args.path, serial_number=args.serial_number)
restore_config(my_odrive, args.file, logger)
else:
raise Exception("unknown command: " + args.command)
except OperationAbortedException:
logger.info("Operation aborted.")
finally:
app_shutdown_token.set()
|
|
424a1f19ea1dd7a26dca3989fb15dcb3cad3daaa
|
cc91a1c30f6c5a176b225bdbf96c7bfd6dfeca52
|
/scripts/ctsbuild/build.py
|
053f550c517da596a87fdd23741829fe50878c85
|
[
"Apache-2.0"
] |
permissive
|
KhronosGroup/VK-GL-CTS
|
d07b2611dc3a258b5be4ff87c1fe55623f32bac1
|
a745d5c29c5609cb7805f59444520a7700cf39d1
|
refs/heads/main
| 2023-08-31T19:34:48.447202
| 2023-08-25T13:17:54
| 2023-08-25T13:17:54
| 50,466,830
| 454
| 267
|
Apache-2.0
| 2023-09-02T20:49:14
| 2016-01-26T23:29:28
|
C++
|
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
build.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
import shutil
from . common import *
from . config import *
def initBuildDir (config, generator):
cfgArgs = []
# Build base configuration args
cfgArgs += config.getArgs()
# Generator args
cfgArgs += generator.getGenerateArgs(config.getBuildType())
if not os.path.exists(config.buildDir):
os.makedirs(config.buildDir)
pushWorkingDir(config.getBuildDir())
try:
execute([config.getCMakePath(), config.getSrcPath()] + cfgArgs)
finally:
popWorkingDir()
def build (config, generator, targets = None):
if os.path.exists(config.buildDir):
try:
initBuildDir(config, generator)
except Exception as e:
print(e)
print("WARNING: Using existing build directory failed; re-creating build directory")
shutil.rmtree(config.buildDir)
initBuildDir(config, generator)
else:
initBuildDir(config, generator)
baseCmd = [config.getCMakePath(), '--build', '.']
buildArgs = generator.getBuildArgs(config.getBuildType())
pushWorkingDir(config.getBuildDir())
if targets == None:
execute(baseCmd + buildArgs)
else:
for target in targets:
execute(baseCmd + ['--target', target] + buildArgs)
popWorkingDir()
|
035f9e79a604f11563de2c6d3b8cd57502ac44e7
|
d8b3cbb7644c95996e4ac2e66ae8ba82e620bbb0
|
/dumpall/thirdparty/__init__.py
|
61fd0918e89d12222c34a08f93bf6e637a023a42
|
[
"MIT"
] |
permissive
|
0xHJK/dumpall
|
38d5d692901350c22be2c3d65e88bc239cdad435
|
a9e27a0ce484c624ae1186b047172641c90ef7f8
|
refs/heads/master
| 2023-01-11T00:38:44.342038
| 2022-07-05T05:30:40
| 2022-07-05T05:30:40
| 216,212,738
| 1,075
| 133
|
MIT
| 2022-12-27T15:37:11
| 2019-10-19T13:42:55
|
Python
|
UTF-8
|
Python
| false
| false
| 100
|
py
|
__init__.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: HJK
@file: __init__
@time: 2019-10-25
"""
|
a9b627a8b4bd88af6575bfa903a4e4e90866e012
|
39b42c4c2f55e58e96040b214280d732b1310121
|
/tests/searchcommands/test_generator_command.py
|
63ae3ac8350d3ed47ea34d475433b76ecee9a893
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
splunk/splunk-sdk-python
|
bcba843519b145ba45af659b88b7d33ef8fb00df
|
be5268d8859c5a90cca8341ed3f1a29e07a4ff0f
|
refs/heads/master
| 2023-08-23T10:38:20.422713
| 2023-07-20T09:02:53
| 2023-07-20T09:02:53
| 1,504,670
| 621
| 435
|
Apache-2.0
| 2023-09-11T00:33:32
| 2011-03-20T23:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,982
|
py
|
test_generator_command.py
|
import io
import time
from . import chunked_data_stream as chunky
from splunklib.searchcommands import Configuration, GeneratingCommand
def test_simple_generator():
@Configuration()
class GeneratorTest(GeneratingCommand):
def generate(self):
for num in range(1, 10):
yield {'_time': time.time(), 'event_index': num}
generator = GeneratorTest()
in_stream = io.BytesIO()
in_stream.write(chunky.build_getinfo_chunk())
in_stream.write(chunky.build_chunk({'action': 'execute'}))
in_stream.seek(0)
out_stream = io.BytesIO()
generator._process_protocol_v2([], in_stream, out_stream)
out_stream.seek(0)
ds = chunky.ChunkedDataStream(out_stream)
is_first_chunk = True
finished_seen = False
expected = set(map(lambda i: str(i), range(1, 10)))
seen = set()
for chunk in ds:
if is_first_chunk:
assert chunk.meta["generating"] is True
assert chunk.meta["type"] == "stateful"
is_first_chunk = False
finished_seen = chunk.meta.get("finished", False)
for row in chunk.data:
seen.add(row["event_index"])
print(out_stream.getvalue())
print(expected)
print(seen)
assert expected.issubset(seen)
assert finished_seen
def test_allow_empty_input_for_generating_command():
"""
Passing allow_empty_input for generating command will cause an error
"""
@Configuration()
class GeneratorTest(GeneratingCommand):
def generate(self):
for num in range(1, 3):
yield {"_index": num}
generator = GeneratorTest()
in_stream = io.BytesIO()
out_stream = io.BytesIO()
try:
generator.process([], in_stream, out_stream, allow_empty_input=False)
except ValueError as error:
assert str(error) == "allow_empty_input cannot be False for Generating Commands"
def test_all_fieldnames_present_for_generated_records():
@Configuration()
class GeneratorTest(GeneratingCommand):
def generate(self):
yield self.gen_record(_time=time.time(), one=1)
yield self.gen_record(_time=time.time(), two=2)
yield self.gen_record(_time=time.time(), three=3)
yield self.gen_record(_time=time.time(), four=4)
yield self.gen_record(_time=time.time(), five=5)
generator = GeneratorTest()
in_stream = io.BytesIO()
in_stream.write(chunky.build_getinfo_chunk())
in_stream.write(chunky.build_chunk({'action': 'execute'}))
in_stream.seek(0)
out_stream = io.BytesIO()
generator._process_protocol_v2([], in_stream, out_stream)
out_stream.seek(0)
ds = chunky.ChunkedDataStream(out_stream)
fieldnames_expected = {'_time', 'one', 'two', 'three', 'four', 'five'}
fieldnames_actual = set()
for chunk in ds:
for row in chunk.data:
fieldnames_actual |= set(row.keys())
assert fieldnames_expected.issubset(fieldnames_actual)
|
d4674e4f0c09fdde2d705aa45cf69ab35f57b407
|
5a1ea4ff45c3b4da28e32711e8cfcadfd3086ad8
|
/src/contextualise/image.py
|
f635d47e5d323a4ef33f22fe2003824543be20f5
|
[
"MIT"
] |
permissive
|
brettkromkamp/contextualise
|
ff857749315617d7b8e863e82144909bbffc0640
|
99c7d10e64eed080e7c08e758406222b4db08d36
|
refs/heads/master
| 2023-08-23T14:02:46.994523
| 2023-08-16T20:00:42
| 2023-08-16T20:00:42
| 182,712,987
| 1,081
| 50
|
MIT
| 2020-05-11T20:15:39
| 2019-04-22T09:12:27
|
Python
|
UTF-8
|
Python
| false
| false
| 12,027
|
py
|
image.py
|
"""
image.py file. Part of the Contextualise project.
February 13, 2022
Brett Alistair Kromkamp (brettkromkamp@gmail.com)
"""
import os
import uuid
import maya
from flask import Blueprint, current_app, flash, redirect, render_template, request, session, url_for
from flask_login import current_user
from flask_security import login_required
from topicdb.models.attribute import Attribute
from topicdb.models.collaborationmode import CollaborationMode
from topicdb.models.datatype import DataType
from topicdb.models.occurrence import Occurrence
from topicdb.store.retrievalmode import RetrievalMode
from werkzeug.exceptions import abort
from .topic_store import get_topic_store
bp = Blueprint("image", __name__)
RESOURCES_DIRECTORY = "resources"
EXTENSIONS_WHITELIST = {"png", "jpg", "jpeg", "gif"}
@bp.route("/images/<map_identifier>/<topic_identifier>")
@login_required
def index(map_identifier, topic_identifier):
store = get_topic_store()
topic_map = store.get_map(map_identifier, current_user.id)
if topic_map is None:
abort(404)
# If the map doesn't belong to the user and they don't have the right
# collaboration mode on the map, then abort
if not topic_map.owner and topic_map.collaboration_mode is not CollaborationMode.EDIT:
abort(403)
topic = store.get_topic(
map_identifier,
topic_identifier,
resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES,
)
if topic is None:
abort(404)
image_occurrences = store.get_topic_occurrences(
map_identifier,
topic_identifier,
"image",
resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES,
)
images = []
for image_occurrence in image_occurrences:
images.append(
{
"identifier": image_occurrence.identifier,
"title": image_occurrence.get_attribute_by_name("title").value,
"scope": image_occurrence.scope,
"url": image_occurrence.resource_ref,
}
)
creation_date_attribute = topic.get_attribute_by_name("creation-timestamp")
creation_date = maya.parse(creation_date_attribute.value) if creation_date_attribute else "Undefined"
map_notes_count = store.get_topic_occurrences_statistics(map_identifier, "notes")["note"]
return render_template(
"image/index.html",
topic_map=topic_map,
topic=topic,
images=images,
creation_date=creation_date,
map_notes_count=map_notes_count,
)
@bp.route("/images/upload/<map_identifier>/<topic_identifier>", methods=("GET", "POST"))
@login_required
def upload(map_identifier, topic_identifier):
store = get_topic_store()
topic_map = store.get_map(map_identifier, current_user.id)
if topic_map is None:
abort(404)
# If the map doesn't belong to the user and they don't have the right
# collaboration mode on the map, then abort
if not topic_map.owner and topic_map.collaboration_mode is not CollaborationMode.EDIT:
abort(403)
topic = store.get_topic(
map_identifier,
topic_identifier,
resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES,
)
if topic is None:
abort(404)
map_notes_count = store.get_topic_occurrences_statistics(map_identifier, "notes")["note"]
error = 0
if request.method == "POST":
form_image_title = request.form["image-title"].strip()
form_image_scope = request.form["image-scope"].strip()
form_upload_file = request.files["image-file"] if "image-file" in request.files else None
# If no values have been provided set their default values
if not form_image_scope:
form_image_scope = session["current_scope"]
# Validate form inputs
if not form_image_title:
error = error | 1
if not form_upload_file:
error = error | 2
else:
if form_upload_file.filename == "":
error = error | 4
elif not allowed_file(form_upload_file.filename):
error = error | 8
if not store.topic_exists(topic_map.identifier, form_image_scope):
error = error | 16
if error != 0:
flash(
"An error occurred when uploading the image. Please review the warnings and fix accordingly.",
"warning",
)
else:
image_file_name = f"{str(uuid.uuid4())}.{get_file_extension(form_upload_file.filename)}"
# Create the image directory for this topic map if it doesn't already exist
image_directory = os.path.join(current_app.static_folder, RESOURCES_DIRECTORY, str(map_identifier))
if not os.path.isdir(image_directory):
os.makedirs(image_directory)
file_path = os.path.join(image_directory, image_file_name)
form_upload_file.save(file_path)
image_occurrence = Occurrence(
instance_of="image",
topic_identifier=topic.identifier,
scope=form_image_scope,
resource_ref=image_file_name,
)
title_attribute = Attribute(
"title",
form_image_title,
image_occurrence.identifier,
data_type=DataType.STRING,
)
# Persist objects to the topic store
store.create_occurrence(topic_map.identifier, image_occurrence)
store.create_attribute(topic_map.identifier, title_attribute)
flash("Image successfully uploaded.", "success")
return redirect(
url_for(
"image.index",
map_identifier=topic_map.identifier,
topic_identifier=topic.identifier,
)
)
return render_template(
"image/upload.html",
error=error,
topic_map=topic_map,
topic=topic,
image_title=form_image_title,
image_scope=form_image_scope,
map_notes_count=map_notes_count,
)
return render_template(
"image/upload.html",
error=error,
topic_map=topic_map,
topic=topic,
map_notes_count=map_notes_count,
)
@bp.route(
"/images/edit/<map_identifier>/<topic_identifier>/<image_identifier>",
methods=("GET", "POST"),
)
@login_required
def edit(map_identifier, topic_identifier, image_identifier):
store = get_topic_store()
topic_map = store.get_map(map_identifier, current_user.id)
if topic_map is None:
abort(404)
# If the map doesn't belong to the user and they don't have the right
# collaboration mode on the map, then abort
if not topic_map.owner and topic_map.collaboration_mode is not CollaborationMode.EDIT:
abort(403)
topic = store.get_topic(
map_identifier,
topic_identifier,
resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES,
)
if topic is None:
abort(404)
image_occurrence = store.get_occurrence(
map_identifier,
image_identifier,
resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES,
)
form_image_title = image_occurrence.get_attribute_by_name("title").value
form_image_resource_ref = image_occurrence.resource_ref
form_image_scope = image_occurrence.scope
map_notes_count = store.get_topic_occurrences_statistics(map_identifier, "notes")["note"]
error = 0
if request.method == "POST":
form_image_title = request.form["image-title"].strip()
form_image_scope = request.form["image-scope"].strip()
# If no values have been provided set their default values
if not form_image_scope:
form_image_scope = session["current_scope"]
# Validate form inputs
if not form_image_title:
error = error | 1
if not store.topic_exists(topic_map.identifier, form_image_scope):
error = error | 2
if error != 0:
flash(
"An error occurred when submitting the form. Please review the warnings and fix accordingly.",
"warning",
)
else:
# Update image's title if it has changed
if image_occurrence.get_attribute_by_name("title").value != form_image_title:
store.update_attribute_value(
topic_map.identifier,
image_occurrence.get_attribute_by_name("title").identifier,
form_image_title,
)
# Update image's scope if it has changed
if image_occurrence.scope != form_image_scope:
store.update_occurrence_scope(map_identifier, image_occurrence.identifier, form_image_scope)
flash("Image successfully updated.", "success")
return redirect(
url_for(
"image.index",
map_identifier=topic_map.identifier,
topic_identifier=topic.identifier,
)
)
return render_template(
"image/edit.html",
error=error,
topic_map=topic_map,
topic=topic,
image_identifier=image_occurrence.identifier,
image_title=form_image_title,
image_resource_ref=form_image_resource_ref,
image_scope=form_image_scope,
map_notes_count=map_notes_count,
)
@bp.route(
"/images/delete/<map_identifier>/<topic_identifier>/<image_identifier>",
methods=("GET", "POST"),
)
@login_required
def delete(map_identifier, topic_identifier, image_identifier):
store = get_topic_store()
topic_map = store.get_map(map_identifier, current_user.id)
if topic_map is None:
abort(404)
# If the map doesn't belong to the user and they don't have the right
# collaboration mode on the map, then abort
if not topic_map.owner and topic_map.collaboration_mode is not CollaborationMode.EDIT:
abort(403)
topic = store.get_topic(
map_identifier,
topic_identifier,
resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES,
)
if topic is None:
abort(404)
image_occurrence = store.get_occurrence(
map_identifier,
image_identifier,
resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES,
)
form_image_title = image_occurrence.get_attribute_by_name("title").value
form_image_resource_ref = image_occurrence.resource_ref
form_image_scope = image_occurrence.scope
map_notes_count = store.get_topic_occurrences_statistics(map_identifier, "notes")["note"]
if request.method == "POST":
# Delete image occurrence from topic store
store.delete_occurrence(map_identifier, image_occurrence.identifier)
# Delete image from file system
image_file_path = os.path.join(
current_app.static_folder,
RESOURCES_DIRECTORY,
str(map_identifier),
image_occurrence.resource_ref,
)
if os.path.exists(image_file_path):
os.remove(image_file_path)
flash("Image successfully deleted.", "warning")
return redirect(
url_for(
"image.index",
map_identifier=topic_map.identifier,
topic_identifier=topic.identifier,
)
)
return render_template(
"image/delete.html",
topic_map=topic_map,
topic=topic,
image_identifier=image_occurrence.identifier,
image_title=form_image_title,
image_resource_ref=form_image_resource_ref,
image_scope=form_image_scope,
map_notes_count=map_notes_count,
)
# ========== HELPER METHODS ==========
def get_file_extension(file_name):
return file_name.rsplit(".", 1)[1].lower()
def allowed_file(file_name):
return get_file_extension(file_name) in EXTENSIONS_WHITELIST
|
470fb74ea8a27d81155addad788a63a27b7d1491
|
7e6f0efd6f4733d09e61b4c6658455e6727cd48f
|
/seed/models/analyses.py
|
6c10af2068b35ee79bd2a8499ba079eff93183ac
|
[
"BSD-2-Clause"
] |
permissive
|
SEED-platform/seed
|
0e4a6a2fa93f4c2528d0c295163a91f836a4253d
|
680b6a2b45f3c568d779d8ac86553a0b08c384c8
|
refs/heads/develop
| 2023-09-01T10:46:25.502697
| 2023-08-30T18:44:21
| 2023-08-30T18:44:21
| 25,450,714
| 108
| 75
|
NOASSERTION
| 2023-09-13T22:18:47
| 2014-10-20T04:26:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,637
|
py
|
analyses.py
|
# !/usr/bin/env python
# encoding: utf-8
"""
SEED Platform (TM), Copyright (c) Alliance for Sustainable Energy, LLC, and other contributors.
See also https://github.com/seed-platform/seed/main/LICENSE.md
"""
import logging
from django.db import models
from seed.analysis_pipelines.utils import get_json_path
from seed.landing.models import SEEDUser as User
from seed.lib.superperms.orgs.models import Organization
logger = logging.getLogger(__name__)
class Analysis(models.Model):
"""
The Analysis represents an analysis performed on one or more properties.
"""
BSYNCR = 1
BETTER = 2
EUI = 3
CO2 = 4
SERVICE_TYPES = (
(BSYNCR, 'BSyncr'),
(BETTER, 'BETTER'),
(EUI, 'EUI'),
(CO2, 'CO2')
)
PENDING_CREATION = 8
CREATING = 10
READY = 20
QUEUED = 30
RUNNING = 40
FAILED = 50
STOPPED = 60
COMPLETED = 70
STATUS_TYPES = (
(PENDING_CREATION, 'Pending Creation'),
(CREATING, 'Creating'),
(READY, 'Ready'),
(QUEUED, 'Queued'),
(RUNNING, 'Running'),
(FAILED, 'Failed'),
(STOPPED, 'Stopped'),
(COMPLETED, 'Completed'),
)
name = models.CharField(max_length=255, blank=False, default=None)
service = models.IntegerField(choices=SERVICE_TYPES)
created_at = models.DateTimeField(auto_now_add=True)
start_time = models.DateTimeField(null=True, blank=True)
end_time = models.DateTimeField(null=True, blank=True)
status = models.IntegerField(default=PENDING_CREATION, choices=STATUS_TYPES)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
configuration = models.JSONField(default=dict, blank=True)
# parsed_results can contain any results gathered from the resulting file(s)
# that are applicable to the entire analysis (i.e., all properties involved).
# For property-specific results, use the AnalysisPropertyView's parsed_results
parsed_results = models.JSONField(default=dict, blank=True)
def get_property_view_info(self, property_id=None):
if property_id is not None:
analysis_property_views = self.analysispropertyview_set.filter(property=property_id)
else:
analysis_property_views = self.analysispropertyview_set
return {
'number_of_analysis_property_views': self.analysispropertyview_set.count(),
'views': list(analysis_property_views.values_list('id', flat=True).distinct()),
'cycles': list(analysis_property_views.values_list('cycle', flat=True).distinct())
}
def get_highlights(self, property_id=None):
"""Get analysis highlights for the overall analysis or for a specific property
:param property_id: int | None, if provided property-specific highlights
from the analysis results are returned. Otherwise highlights from the
overall analysis are returned.
:return: list[dict{}], a list of highlights as dictionaries, each including
a `name` and `value`
"""
if self.status < self.COMPLETED:
return []
results = {}
if property_id is not None:
try:
results = self.analysispropertyview_set.get(property=property_id).parsed_results
except models.Model.DoesNotExist:
return []
else:
results = self.parsed_results
# BSyncr
if self.service == self.BSYNCR:
return [{'name': 'Completed', 'value': ''}]
# BETTER
elif self.service == self.BETTER:
highlights = [
{
'name': ['Potential Cost Savings (USD)'],
'value_template': ['${json_value:,.2f}'],
'json_path': ['assessment.assessment_energy_use.cost_savings_combined'],
}, {
'name': ['Potential Energy Savings'],
'value_template': ['{json_value:,.2f} kWh'],
'json_path': ['assessment.assessment_energy_use.energy_savings_combined'],
}, {
'name': ['BETTER Inverse Model R^2 (Electricity', 'Fossil Fuel)'],
'value_template': ['{json_value:,.2f}', '{json_value:,.2f}'],
'json_path': ['inverse_model.ELECTRICITY.r2', 'inverse_model.FOSSIL_FUEL.r2'],
}
]
ret = []
for highlight in highlights:
full_name = []
full_value = []
for i, name in enumerate(highlight['name']):
parsed_result = get_json_path(highlight['json_path'][i], results)
value = 'N/A'
if parsed_result is not None:
value = highlight['value_template'][i].format(json_value=parsed_result)
full_name.append(name)
full_value.append(value)
ret.append({
'name': ', '.join(full_name),
'value': ', '.join(full_value)
})
return ret
# EUI
elif self.service == self.EUI:
eui_result = results.get('Fractional EUI (kBtu/sqft)')
value = 'N/A'
if eui_result is not None:
value = f'{eui_result:,.2f}'
coverage = results.get('Annual Coverage %')
if coverage is None:
coverage = 'N/A'
return [
{'name': 'Fractional EUI', 'value': f'{value} kBtu/sqft'},
{'name': 'Annual Coverage', 'value': f'{coverage}%'}
]
# CO2
elif self.service == self.CO2:
co2_result = results.get('Average Annual CO2 (kgCO2e)')
value = 'N/A'
if co2_result is not None:
value = f'{co2_result:,.0f}'
coverage = results.get('Annual Coverage %')
if coverage is None:
coverage = 'N/A'
return [
{'name': 'Average Annual CO2', 'value': f'{value} kgCO2e'},
{'name': 'Annual Coverage', 'value': f'{coverage}%'}
]
# Unexpected
return [{'name': 'Unexpected Analysis Type', 'value': 'Oops!'}]
def in_terminal_state(self):
"""Returns True if the analysis has finished, e.g., stopped, failed,
completed, etc
:returns: bool
"""
return self.status in [self.FAILED, self.STOPPED, self.COMPLETED]
|
36f2038672a5c4d051b01c1e5237b805867dbedf
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/robot-server/robot_server/service/legacy/models/modules.py
|
61c68e3aa4b8ca7c98742f2e98b148e23b2c1d8c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 12,685
|
py
|
modules.py
|
import typing
from pydantic import BaseModel, Field
class TemperatureModuleLiveData(BaseModel):
"""Temperature Module live data"""
currentTemp: float = Field(..., description="The current temperature of the module")
targetTemp: typing.Optional[float] = Field(
..., description="The target temperature of the module if any"
)
class MagneticModuleLiveData(BaseModel):
"""Magnetic Module live data"""
engaged: bool = Field(..., description="Whether the magnets are raised or lowered")
height: float = Field(
...,
description="The height of the top of the magnets relative to "
"their home position, in mm",
)
class ThermocyclerModuleLiveData(BaseModel):
"""Thermocycler live data"""
lid: str = Field(..., description="The current state of the lid")
lidTarget: typing.Optional[float] = Field(
..., description="The target temperature of the lid temperature " "controller"
)
lidTemp: float = Field(..., description="The current temperature of the lid")
currentTemp: float = Field(
..., description="The current temperature of the thermocycler block"
)
targetTemp: typing.Optional[float] = Field(
..., description="The target temperature of the thermocycler block"
)
holdTime: typing.Optional[float] = Field(
...,
description="The time left in the current hold step, if any (in " "seconds)",
)
rampRate: typing.Optional[float] = Field(
...,
description="The current ramp rate (in degC/s) for the " "thermocycler block",
)
currentCycleIndex: typing.Optional[int] = Field(
...,
description="The index of the current cycle within the current "
"programmed sequence",
)
totalCycleCount: typing.Optional[int] = Field(
..., description="The total number of cycles within the current " "sequence"
)
currentStepIndex: typing.Optional[int] = Field(
...,
description="The index of the current step within the current "
"programmed cycle",
)
totalStepCount: typing.Optional[int] = Field(
..., description="The total number of steps within the current cycle"
)
class HeaterShakerModuleLiveData(BaseModel):
"""Heater-Shaker live data"""
labwareLatchStatus: str = Field(
..., description="The current state of the Labware Latch"
)
speedStatus: str = Field(..., description="The current shake speed status")
temperatureStatus: str = Field(..., description="The current temperature status")
currentSpeed: int = Field(..., description="Current shake speed of plate in RPM")
currentTemp: float = Field(..., description="Current plate temperature in Celsius")
targetSpeed: typing.Optional[int] = Field(
..., description="Target shake speed in RPM"
)
targetTemp: typing.Optional[float] = Field(
..., description="Target temperature in Celsius"
)
errorDetails: typing.Optional[str] = Field(
..., description="Module error, if present"
)
# ThermocyclerModuleLiveData must be first. The ordering in a Union has to go
# in order of specificity.
ModuleLiveData = typing.Union[
HeaterShakerModuleLiveData,
ThermocyclerModuleLiveData,
TemperatureModuleLiveData,
MagneticModuleLiveData,
]
class PhysicalPort(BaseModel):
hub: bool = Field(
...,
description="If a physical USB external hub is"
" connected to the raspberry pi",
)
port: int = Field(
...,
description="The USB port the module is plugged into."
" If connected via a hub, ``port`` represents the port the hub is plugged into.",
)
portGroup: str = Field(
...,
description="The physical USB port bank the module is plugged into.",
)
hubPort: typing.Optional[int] = Field(
...,
description="If the module is connected via a USB hub,"
" the port on the hub the module is plugged into.",
)
class Module(BaseModel):
"""An object identifying a module"""
name: str = Field(
...,
description="A machine readable identifying name for a module. "
"Deprecated. Prefer moduleModel",
)
displayName: str = Field(
...,
description="A human-presentable name of the module. Deprecated."
" Prefer lookup in the def",
)
moduleModel: str = Field(
..., description="The model of the module (e.g. magneticModuleV1)"
)
port: str = Field(
...,
description="The virtual port to which the module is attached",
)
usbPort: PhysicalPort = Field(
..., description="The physical port to which the module is attached"
)
serial: str = Field(
...,
description="The unique serial number of the module",
)
model: str = Field(
...,
description="The model identifier (i.e. the part number). "
"Deprecated. Prefer revision",
)
revision: str = Field(
..., description="The hardware identifier (i.e. the part number)"
)
fwVersion: str = Field(
...,
description="The current firmware version",
)
hasAvailableUpdate: bool = Field(
..., description="If set, a module update is available"
)
status: str = Field(
...,
description="A human-readable module-specific status",
)
data: ModuleLiveData
class Modules(BaseModel):
"""A list of all attached modules and the status of each one"""
modules: typing.List[Module]
class Config:
schema_extra = {
"examples": {
"nothingAttached": {
"description": "With no modules present",
"value": {"modules": []},
},
"magneticModuleAttached": {
"description": "With a Magnetic Module attached",
"value": {
"modules": [
{
"name": "magdeck",
"displayName": "Magnetic Module",
"moduleModel": "magneticModuleV1",
"port": "tty01_magdeck",
"serial": "MDV2313121",
"model": "mag_deck_v4.0",
"revision": "mag_deck_v4.0",
"fwVersion": "2.1.3",
"status": "engaged",
"hasAvailableUpdate": True,
"data": {"engaged": True, "height": 10},
}
]
},
},
"tempDeckAttached": {
"description": "With a Temperature Module attached",
"value": {
"modules": [
{
"name": "tempdeck",
"displayName": "Temperature Module",
"moduleModel": "temperatureModuleV1",
"revision": "temp_deck_v10",
"port": "tty2_tempdeck",
"serial": "TDV10231231",
"model": "temp_deck_v10",
"hasAvailableUpdate": False,
"fwVersion": "1.2.0",
"status": "cooling",
"data": {"currentTemp": 25, "targetTemp": 10},
}
]
},
},
"thermocyclerAttached": {
"description": "With a Thermocycler attached",
"value": {
"modules": [
{
"name": "thermocycler",
"displayName": "Thermocycler",
"revision": "thermocycler_v10",
"moduleModel": "thermocyclerModuleV1",
"port": "tty3_thermocycler",
"serial": "TCV1006052018",
"model": "thermocycler_v10",
"hasAvailableUpdate": True,
"fwVersion": "1.0.0",
"status": "cooling",
"data": {
"lid": "closed",
"lidTarget": 10,
"lidTemp": 15,
"currentTemp": 20,
"targetTemp": 10,
"holdTime": None,
"rampRate": 10,
"currentCycleIndex": None,
"totalCycleCount": None,
"currentStepIndex": None,
"totalStepCount": None,
},
}
]
},
},
"heaterShakerAttached": {
"description": "With a Heater-Shaker attached",
"value": {
"modules": [
{
"name": "heatershaker",
"displayName": "heatershaker",
"fwVersion": "0.0.1",
"hasAvailableUpdate": True,
"model": "heater-shaker_v10",
"moduleModel": "heaterShakerModuleV1",
"port": "/dev/ot_module_heatershaker1",
"usbPort": {
"hub": False,
"port": 1,
"portGroup": "unknown",
"hubPort": None,
},
"revision": "heater-shaker_v10",
"serial": "HSnnnnnn",
"status": "running",
"data": {
"temperatureStatus": "heating",
"speedStatus": "holding at target",
"labwareLatchStatus": "closed",
"currentTemp": 25.5,
"targetTemp": 50,
"currentSpeed": 10,
"targetSpeed": 300,
"errorDetails": None,
},
}
]
},
},
}
}
class ModuleSerial(BaseModel):
"""Data from the module"""
status: str = Field(..., description="A human-readable module-specific status")
data: ModuleLiveData
class SerialCommand(BaseModel):
"""The serialized module call"""
command_type: str = Field(
..., description="The name of the module function to call"
)
args: typing.Optional[typing.List[typing.Any]] = Field(
None, description="The ordered args list for the call"
)
class Config:
schema_extra = {
"examples": {
"tempModSetTemp": {
"summary": "Set Temperature Module temperature",
"description": "Set the temperature of an attached "
"Temperature Module",
"value": {"command_type": "set_temperature", "args": [60]},
}
}
}
class SerialCommandResponse(BaseModel):
""" "The result of a successful call"""
message: str = Field(..., description="A human readable string")
returnValue: str = Field(None, description="The return value from the call")
class Config:
schema_extra = {
"examples": {
"tempModSetTemperature": {
"summary": "Set temperature OK",
"description": "A successful call to set_temperature "
"on a Temperature Module",
"value": {"message": "Success", "returnValue": None},
}
}
}
|
e3018a35a4999db3cb19f95507db09bfe33da75f
|
fe90bf63c34511ec9a4d7cb5a90957fbbb03a504
|
/boundary_layer/oozier/jsp_macros.py
|
7b1fd37a25ab5133111d212419db4fd4d93c9e3f
|
[
"Apache-2.0"
] |
permissive
|
etsy/boundary-layer
|
778b115f94efc5d50986a289daf3ad265b38926c
|
c29594957c1fb47e308fcc89f7edcefc0797fc89
|
refs/heads/master
| 2023-07-21T17:03:15.769537
| 2023-01-04T14:05:53
| 2023-01-04T14:05:53
| 142,857,095
| 263
| 67
|
Apache-2.0
| 2023-07-19T19:57:04
| 2018-07-30T09:51:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,615
|
py
|
jsp_macros.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 Etsy Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import six
from six.moves import map
import marshmallow as ma
class JspMacroTranslator(object):
# Oozie macros use JSP Expression Language. I can't find a convenient
# expression evaluator, but there aren't too many things we need to support
# so we can do it with regexes and substitutions
OOZIE_MACRO_RE = re.compile(r'\$\{[^}]+\}')
def __init__(self, macros):
self.macros = macros
def translate(self, item):
if isinstance(item, dict):
return {
key: self.translate(value) for (key, value) in six.iteritems(item)
}
elif isinstance(item, list):
return list(map(self.translate, item))
if not isinstance(item, six.string_types):
raise ma.ValidationError('Cannot translate object {} of type {}'.format(
item,
type(item)))
hits = list(self.OOZIE_MACRO_RE.finditer(item))
if not hits:
return item
split = self.OOZIE_MACRO_RE.split(item)
# Some sanity checking to make sure that the split gave us what we expected.
# Given that we know there's at least one hit, we expect that there should be
# at least one more item in splits than in hits. If the text field both
# begins and ends with a hit, then there can potentially be 2 more items
# in the split array.
if not (2 + len(hits) >= len(split) > len(hits)):
raise ma.ValidationError(
'Bad split on input text `{}`: hits == `{}` / split == `{}`'.format(
item,
hits,
split))
if not(split[-1] == '' or len(split) == 1 + len(hits)):
raise ma.ValidationError(
'Bad split on input text `{}`: hits == `{}` / split == `{}`'.format(
item,
hits,
split))
result = split[0]
for (idx, hit) in enumerate(hits):
# Must discard the first 2 and final 1 characters from the hit, because
# these represent the '${' and '}'. Note that we could use capture groups
# in the regex but then these will contaminate the output of the split()
# function (see: https://docs.python.org/2/library/re.html#re.split)
assert hit.group(0).startswith('${') and hit.group(0).endswith('}')
oozie_macro = hit.group(0)[2:-1]
translation = self.macros.get(oozie_macro)
if translation is None:
raise ma.ValidationError(
'Could not translate {}: unrecognized macro `{}`'.format(item, oozie_macro))
# if we've made it here, either translation was successful or
# unsuccessful translations are permitted. In the latter case,
# we reinsert the oozie macro.
result += translation or hit.group(0)
result += split[1 + idx]
return result
|
3a122ad29266fe29236918b60fa25c167b760135
|
168f6e03f82b9a6530ce174659c218a162248b4d
|
/minidump/utils/winapi/psapi.py
|
6cfc9965bdd4a7d21a07b63bacd30beb787d57e6
|
[
"MIT"
] |
permissive
|
skelsec/minidump
|
c35a07846540c345d73e72c7e39525e42ba99662
|
069422590a5856eec8bfcf174b55a45442b8942f
|
refs/heads/master
| 2023-04-07T18:58:30.406970
| 2023-02-21T15:35:03
| 2023-02-21T15:35:03
| 134,586,242
| 226
| 50
|
MIT
| 2023-03-20T17:39:12
| 2018-05-23T15:02:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
psapi.py
|
from minidump.utils.winapi.defines import *
# typedef struct _MODULEINFO {
# LPVOID lpBaseOfDll;
# DWORD SizeOfImage;
# LPVOID EntryPoint;
# } MODULEINFO, *LPMODULEINFO;
class MODULEINFO(Structure):
_fields_ = [
("lpBaseOfDll", LPVOID), # remote pointer
("SizeOfImage", DWORD),
("EntryPoint", LPVOID), # remote pointer
]
LPMODULEINFO = POINTER(MODULEINFO)
# BOOL WINAPI EnumProcessModules(
# __in HANDLE hProcess,
# __out HMODULE *lphModule,
# __in DWORD cb,
# __out LPDWORD lpcbNeeded
# );
def EnumProcessModules(hProcess):
_EnumProcessModules = windll.psapi.EnumProcessModules
_EnumProcessModules.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD]
_EnumProcessModules.restype = bool
_EnumProcessModules.errcheck = RaiseIfZero
size = 0x1000
lpcbNeeded = DWORD(size)
unit = sizeof(HMODULE)
while 1:
lphModule = (HMODULE * (size // unit))()
_EnumProcessModules(hProcess, byref(lphModule), lpcbNeeded, byref(lpcbNeeded))
needed = lpcbNeeded.value
if needed <= size:
break
size = needed
return [ lphModule[index] for index in range(0, int(needed // unit)) ]
def GetModuleFileNameExW(hProcess, hModule = None):
_GetModuleFileNameExW = ctypes.windll.psapi.GetModuleFileNameExW
_GetModuleFileNameExW.argtypes = [HANDLE, HMODULE, LPWSTR, DWORD]
_GetModuleFileNameExW.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_unicode_buffer(u"", nSize)
nCopied = _GetModuleFileNameExW(hProcess, hModule, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
# BOOL WINAPI GetModuleInformation(
# __in HANDLE hProcess,
# __in HMODULE hModule,
# __out LPMODULEINFO lpmodinfo,
# __in DWORD cb
# );
def GetModuleInformation(hProcess, hModule, lpmodinfo = None):
_GetModuleInformation = windll.psapi.GetModuleInformation
_GetModuleInformation.argtypes = [HANDLE, HMODULE, LPMODULEINFO, DWORD]
_GetModuleInformation.restype = bool
_GetModuleInformation.errcheck = RaiseIfZero
if lpmodinfo is None:
lpmodinfo = MODULEINFO()
_GetModuleInformation(hProcess, hModule, byref(lpmodinfo), sizeof(lpmodinfo))
return lpmodinfo
|
a73066bd337fb81b5327b25616a522daa1c47a11
|
fdfffa8cacb572a157ead4a9723f90b25ecfe50c
|
/examples/redis/redis-example.py
|
0a2eac5af1714367f073b293e282f6fd690805ab
|
[
"Apache-2.0",
"LicenseRef-scancode-gutenberg-2020",
"CC0-1.0",
"BSD-3-Clause",
"LGPL-2.0-or-later"
] |
permissive
|
apache/ignite
|
0bc83435a8db46d9c4df000fe05b1c70165b37d4
|
dbf1c7825d74809cd6859c85a8ac9ed9ac071e39
|
refs/heads/master
| 2023-08-31T21:31:04.618489
| 2023-08-31T19:43:09
| 2023-08-31T19:43:09
| 31,006,158
| 4,806
| 2,308
|
Apache-2.0
| 2023-09-14T18:56:33
| 2015-02-19T08:00:05
|
Java
|
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
redis-example.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import redis
'''
To execute this script, run an Ignite instance with 'redis-ignite-internal-cache-0' cache specified and configured.
You will also need to have 'redis-py' installed.
See https://github.com/andymccurdy/redis-py for the details on redis-py.
'''
r = redis.StrictRedis(host='localhost', port=11211, db=0)
# set entry.
r.set('k1', 1)
# check.
print('Value for "k1": %s' % r.get('k1'))
# change entry's value.
r.set('k1', 'new_val')
# check.
print('Value for "k1": %s' % r.get('k1'))
# set another entry.
r.set('k2', 2)
# check.
print('Value for "k2": %s' % r.get('k2'))
# get both values.
print('Values for "k1" and "k2": %s' % r.mget('k1', 'k2'))
# delete one entry.
r.delete('k1')
# check one entry left.
print('Values for "k1" and "k2": %s' % r.mget('k1', 'k2'))
# check db size
print('Db size: %d' % r.dbsize())
# increment.
print('Value for incremented "inc_k" : %s' % r.incr('inc_k'))
# increment again.
print('Value for incremented "inc_k" : %s' % r.incr('inc_k'))
|
9fe83268b1205ba045715121989c247b31888975
|
af6484154df8451750cd9cbce0f62f549c100b55
|
/models/vision/detection/configs/faster_rcnn/SM/CI/4/faster_rcnn_r50v1_d_fpn_1x_coco.py
|
94a5ae63f7b98b6eceaf6bb193178b60651f0cdc
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws-samples/deep-learning-models
|
375cd60d45379a290c77dc91142999278d12e8ad
|
0a852badc1004b9ae32a3a1c44bdec1ff9604329
|
refs/heads/master
| 2023-01-06T17:04:26.841548
| 2023-01-04T20:03:37
| 2023-01-04T20:03:37
| 141,620,532
| 141
| 80
|
NOASSERTION
| 2023-01-05T20:31:25
| 2018-07-19T19:07:10
|
Python
|
UTF-8
|
Python
| false
| false
| 61
|
py
|
faster_rcnn_r50v1_d_fpn_1x_coco.py
|
base_files = ['../../4/faster_rcnn_r50v1_d_fpn_1x_coco.py']
|
adf735fc8e42ba430bcf85f25527508dfb3b8739
|
9a2bbc25016326b3b1da275e3b9d9a3c5c5878a6
|
/project/game/ai/kan.py
|
2b226add2e6dc22cd338f6879c48e00355687a00
|
[
"MIT"
] |
permissive
|
MahjongRepository/tenhou-python-bot
|
3daabf510d58dfe7525bccf0df1575f027b632d2
|
112b08faab08ee862813de06cb5acc5db1c4feb0
|
refs/heads/dev
| 2023-07-20T14:51:02.101557
| 2023-07-08T10:11:47
| 2023-07-08T10:11:47
| 56,445,019
| 217
| 75
|
MIT
| 2023-05-23T02:05:41
| 2016-04-17T15:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 6,687
|
py
|
kan.py
|
from typing import Optional
import utils.decisions_constants as log
from mahjong.tile import TilesConverter
from mahjong.utils import is_pon
from utils.decisions_logger import MeldPrint
class Kan:
def __init__(self, player):
self.player = player
# TODO for better readability need to separate it on three methods:
# should_call_closed_kan, should_call_open_kan, should_call_shouminkan
def should_call_kan(self, tile_136: int, open_kan: bool, from_riichi=False) -> Optional[str]:
"""
Method will decide should we call a kan, or upgrade pon to kan
:return: kan type
"""
# we can't call kan on the latest tile
if self.player.table.count_of_remaining_tiles <= 1:
return None
if self.player.config.FEATURE_DEFENCE_ENABLED:
threats = self.player.ai.defence.get_threatening_players()
else:
threats = []
if open_kan:
# we don't want to start open our hand from called kan
if not self.player.is_open_hand:
return None
# there is no sense to call open kan when we are not in tempai
if not self.player.in_tempai:
return None
# we have a bad wait, rinshan chance is low
if len(self.player.ai.waiting) < 2 or self.player.ai.ukeire < 5:
return None
# there are threats, open kan is probably a bad idea
if threats:
return None
tile_34 = tile_136 // 4
tiles_34 = TilesConverter.to_34_array(self.player.tiles)
# save original hand state
original_tiles = self.player.tiles[:]
new_shanten = 0
previous_shanten = 0
new_waits_count = 0
previous_waits_count = 0
# let's check can we upgrade opened pon to the kan
pon_melds = [x for x in self.player.meld_34_tiles if is_pon(x)]
has_shouminkan_candidate = False
for meld in pon_melds:
# tile is equal to our already opened pon
if tile_34 in meld:
has_shouminkan_candidate = True
closed_hand_34 = TilesConverter.to_34_array(self.player.closed_hand)
previous_shanten, previous_waits_count = self._calculate_shanten_for_kan()
self.player.tiles = original_tiles[:]
closed_hand_34[tile_34] -= 1
tiles_34[tile_34] -= 1
new_waiting, new_shanten = self.player.ai.hand_builder.calculate_waits(
closed_hand_34, tiles_34, use_chiitoitsu=False
)
new_waits_count = self.player.ai.hand_builder.count_tiles(new_waiting, closed_hand_34)
closed_hand_34 = TilesConverter.to_34_array(self.player.closed_hand)
if not open_kan and not has_shouminkan_candidate and closed_hand_34[tile_34] != 4:
return None
if open_kan and closed_hand_34[tile_34] != 3:
return None
closed_hand_34 = TilesConverter.to_34_array(self.player.closed_hand)
tiles_34 = TilesConverter.to_34_array(self.player.tiles)
if not has_shouminkan_candidate:
if open_kan:
# this 4 tiles can only be used in kan, no other options
previous_waiting, previous_shanten = self.player.ai.hand_builder.calculate_waits(
closed_hand_34, tiles_34, use_chiitoitsu=False
)
previous_waits_count = self.player.ai.hand_builder.count_tiles(previous_waiting, closed_hand_34)
elif from_riichi:
# hand did not change since we last recalculated it, and the only thing we can do is to call kan
previous_waits_count = self.player.ai.ukeire
else:
previous_shanten, previous_waits_count = self._calculate_shanten_for_kan()
self.player.tiles = original_tiles[:]
closed_hand_34[tile_34] = 0
new_waiting, new_shanten = self.player.ai.hand_builder.calculate_waits(
closed_hand_34, tiles_34, use_chiitoitsu=False
)
closed_hand_34[tile_34] = 4
new_waits_count = self.player.ai.hand_builder.count_tiles(new_waiting, closed_hand_34)
# it is possible that we don't have results here
# when we are in agari state (but without yaku)
if previous_shanten is None:
return None
# it is not possible to reduce number of shanten by calling a kan
assert new_shanten >= previous_shanten
# if shanten number is the same, we should only call kan if ukeire didn't become worse
if new_shanten == previous_shanten:
# we cannot improve ukeire by calling kan (not considering the tile we drew from the dead wall)
assert new_waits_count <= previous_waits_count
if new_waits_count == previous_waits_count:
kan_type = has_shouminkan_candidate and MeldPrint.SHOUMINKAN or MeldPrint.KAN
if kan_type == MeldPrint.SHOUMINKAN:
if threats:
# there are threats and we are not even in tempai - let's not do shouminkan
if not self.player.in_tempai:
return None
# there are threats and our tempai is weak, let's not do shouminkan
if len(self.player.ai.waiting) < 2 or self.player.ai.ukeire < 3:
return None
else:
# no threats, but too many shanten, let's not do shouminkan
if new_shanten > 2:
return None
# no threats, and ryanshanten, but ukeire is meh, let's not do shouminkan
if new_shanten == 2:
if self.player.ai.ukeire < 16:
return None
self.player.logger.debug(log.KAN_DEBUG, f"Open kan type='{kan_type}'")
return kan_type
return None
def _calculate_shanten_for_kan(self):
previous_results, previous_shanten = self.player.ai.hand_builder.find_discard_options()
previous_results = [x for x in previous_results if x.shanten == previous_shanten]
# it is possible that we don't have results here
# when we are in agari state (but without yaku)
if not previous_results:
return None, None
previous_waits_cnt = sorted(previous_results, key=lambda x: -x.ukeire)[0].ukeire
return previous_shanten, previous_waits_cnt
|
b110b81f00eb474068046957f672e2264f4a0612
|
bed34365a9dab825fd9f4a4ff1b0863f441266ac
|
/neutron/agent/linux/external_process.py
|
f1e902eeb187e2cc3ff29b5851a974bc10561c55
|
[
"Apache-2.0"
] |
permissive
|
openstack/neutron
|
0913ee3cd69d5bdb9c10aa084d4e1803abee320c
|
dde31aae392b80341f6440eb38db1583563d7d1f
|
refs/heads/master
| 2023-08-31T13:09:41.831598
| 2023-08-31T11:37:30
| 2023-08-31T11:37:30
| 2,400,289
| 1,174
| 1,325
|
Apache-2.0
| 2022-06-29T08:00:05
| 2011-09-16T16:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 11,420
|
py
|
external_process.py
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os.path
import eventlet
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.conf.agent import common as agent_cfg
LOG = logging.getLogger(__name__)
PROCESS_TAG = 'PROCESS_TAG'
DEFAULT_SERVICE_NAME = 'default-service'
agent_cfg.register_external_process_opts()
agent_cfg.register_process_monitor_opts(cfg.CONF)
class MonitoredProcess(object, metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def active(self):
"""Boolean representing the running state of the process."""
@abc.abstractmethod
def enable(self):
"""Enable the service, or respawn the process."""
class ProcessManager(MonitoredProcess):
"""An external process manager for Neutron spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, namespace=None, service=None,
pids_path=None, default_cmd_callback=None,
cmd_addl_env=None, pid_file=None, run_as_root=False,
custom_reload_callback=None):
self.conf = conf
self.uuid = uuid
self.namespace = namespace
self.default_cmd_callback = default_cmd_callback
self.pids_path = pids_path or self.conf.external_pids
self.pid_file = pid_file
self.run_as_root = run_as_root or self.namespace is not None
self.custom_reload_callback = custom_reload_callback
self.kill_scripts_path = cfg.CONF.AGENT.kill_scripts_path
if service:
self.service_pid_fname = 'pid.' + service
self.service = service
else:
self.service_pid_fname = 'pid'
self.service = DEFAULT_SERVICE_NAME
process_tag = '%s-%s' % (self.service, self.uuid)
self.cmd_addl_env = cmd_addl_env or {}
self.cmd_addl_env[PROCESS_TAG] = process_tag
fileutils.ensure_tree(os.path.dirname(self.get_pid_file_name()),
mode=0o755)
def enable(self, cmd_callback=None, reload_cfg=False, ensure_active=False):
if not self.active:
if not cmd_callback:
cmd_callback = self.default_cmd_callback
cmd = cmd_callback(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
run_as_root=self.run_as_root)
elif reload_cfg:
self.reload_cfg()
if ensure_active:
common_utils.wait_until_true(lambda: self.active)
def reload_cfg(self):
if self.custom_reload_callback:
self.disable(get_stop_command=self.custom_reload_callback)
else:
self.disable('HUP')
def disable(self, sig='9', get_stop_command=None):
pid = self.pid
if self.active:
if get_stop_command:
cmd = get_stop_command(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
run_as_root=self.run_as_root,
privsep_exec=True)
else:
cmd = self.get_kill_cmd(sig, pid)
utils.execute(cmd, addl_env=self.cmd_addl_env,
run_as_root=self.run_as_root,
privsep_exec=True)
# In the case of shutting down, remove the pid file
if sig == '9':
utils.delete_if_exists(self.get_pid_file_name(),
run_as_root=self.run_as_root)
elif pid:
LOG.debug('%(service)s process for %(uuid)s pid %(pid)d is stale, '
'ignoring signal %(signal)s',
{'service': self.service, 'uuid': self.uuid,
'pid': pid, 'signal': sig})
else:
LOG.debug('No %(service)s process started for %(uuid)s',
{'service': self.service, 'uuid': self.uuid})
def get_kill_cmd(self, sig, pid):
if self.kill_scripts_path:
kill_file = "%s-kill" % self.service
kill_file_path = os.path.join(self.kill_scripts_path, kill_file)
if os.path.isfile(kill_file_path):
return [kill_file_path, sig, pid]
return ['kill', '-%s' % (sig), pid]
def get_pid_file_name(self):
"""Returns the file name for a given kind of config file."""
if self.pid_file:
return self.pid_file
else:
return utils.get_conf_file_name(self.pids_path,
self.uuid,
self.service_pid_fname)
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
return utils.get_value_from_file(self.get_pid_file_name(), int)
@property
def active(self):
cmdline = self.cmdline
return self.uuid in cmdline if cmdline else False
@property
def cmdline(self):
pid = self.pid
if not pid:
return
try:
return ' '.join(psutil.Process(pid).cmdline())
except (psutil.NoSuchProcess, psutil.AccessDenied):
return
ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
class ProcessMonitor(object):
def __init__(self, config, resource_type):
"""Handle multiple process managers and watch over all of them.
:param config: oslo config object with the agent configuration.
:type config: oslo_config.ConfigOpts
:param resource_type: can be dhcp, router, etc.
:type resource_type: str
"""
self._config = config
self._resource_type = resource_type
self._monitored_processes = {}
if self._config.AGENT.check_child_processes_interval:
self._spawn_checking_thread()
def register(self, uuid, service_name, monitored_process):
"""Start monitoring a process.
The given monitored_process will be tied to it's uuid+service_name
replacing the old one if it existed already.
The monitored_process should be enabled before registration,
otherwise ProcessMonitor could try to enable the process itself,
which could lead to double enable and if unlucky enough, two processes
running, and also errors in the logs.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
:param monitored_process: MonitoredProcess we want to monitor.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes[service_id] = monitored_process
def unregister(self, uuid, service_name):
"""Stop monitoring a process.
The uuid+service_name will be removed from the monitored processes.
The service must be disabled **after** unregistering, otherwise if
process monitor checks after you disable the process, and before
you unregister it, the process will be respawned, and left orphaned
into the system.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes.pop(service_id, None)
def stop(self):
"""Stop the process monitoring.
This method will stop the monitoring thread, but no monitored
process will be stopped.
"""
self._monitor_processes = False
def _spawn_checking_thread(self):
self._monitor_processes = True
eventlet.spawn(self._periodic_checking_thread)
@lockutils.synchronized("_check_child_processes")
def _check_child_processes(self):
# we build the list of keys before iterating in the loop to cover
# the case where other threads add or remove items from the
# dictionary which otherwise will cause a RuntimeError
for service_id in list(self._monitored_processes):
pm = self._monitored_processes.get(service_id)
if pm and not pm.active:
LOG.error("%(service)s for %(resource_type)s "
"with uuid %(uuid)s not found. "
"The process should not have died",
{'service': service_id.service,
'resource_type': self._resource_type,
'uuid': service_id.uuid})
self._execute_action(service_id)
eventlet.sleep(0)
def _periodic_checking_thread(self):
while self._monitor_processes:
eventlet.sleep(self._config.AGENT.check_child_processes_interval)
eventlet.spawn(self._check_child_processes)
def _execute_action(self, service_id):
action = self._config.AGENT.check_child_processes_action
action_function = getattr(self, "_%s_action" % action)
action_function(service_id)
def _respawn_action(self, service_id):
LOG.warning("Respawning %(service)s for uuid %(uuid)s",
{'service': service_id.service,
'uuid': service_id.uuid})
self._monitored_processes[service_id].enable()
def _exit_action(self, service_id):
LOG.error("Exiting agent as programmed in check_child_processes_"
"actions")
self._exit_handler(service_id.uuid, service_id.service)
def _exit_handler(self, uuid, service):
"""This is an exit handler for the ProcessMonitor.
It will be called if the administrator configured the exit action in
check_child_processes_actions, and one of our external processes die
unexpectedly.
"""
LOG.error("Exiting agent because of a malfunction with the "
"%(service)s process identified by uuid %(uuid)s",
{'service': service, 'uuid': uuid})
raise SystemExit(1)
|
2e8d8715a2ba3e1e62d4b5b02b643a249e104143
|
a5622dafafd782af153be2bc0bd19cb086fd07b2
|
/tests/integration_tests/tests/agent_tests/test_agent_install_workflow.py
|
234c16823f2569058ed1fc2228899ad579409b79
|
[
"Apache-2.0"
] |
permissive
|
cloudify-cosmo/cloudify-manager
|
8b2d226ad5a9dd8103d7690b2f8081bef24078e1
|
c0de6442e1d7653fad824d75e571802a74eee605
|
refs/heads/master
| 2023-09-06T09:11:51.753912
| 2023-09-04T08:01:58
| 2023-09-04T08:01:58
| 18,326,574
| 146
| 84
|
Apache-2.0
| 2023-09-04T08:02:00
| 2014-04-01T11:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 9,979
|
py
|
test_agent_install_workflow.py
|
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import pytest
import retrying
from integration_tests import AgentTestWithPlugins
from integration_tests.tests.utils import get_resource as resource
pytestmark = pytest.mark.group_agents
@pytest.mark.usefixtures('dockercompute_plugin')
class TestWorkflow(AgentTestWithPlugins):
def _get_queues(self, vhost=None):
cmd = ['rabbitmqctl', 'list_queues', '-s']
if vhost:
cmd += ['-p', vhost]
output = self.env.execute_on_manager(cmd)
return {line.split()[0] for line in output.splitlines()}
def _get_exchanges(self, vhost=None):
cmd = ['rabbitmqctl', 'list_exchanges', '-s']
if vhost:
cmd += ['-p', vhost]
output = self.env.execute_on_manager(cmd)
return {line.split()[0] for line in output.splitlines()}
def test_amqp_queues_list(self):
"""There's no additional queues after uninstalling the agent.
We've seen queue leaks in the past, where queues or exchanges
were not deleted. Check that uninstalling the agent, also removes
its AMQP resources.
"""
vhost = 'rabbitmq_vhost_default_tenant'
deployment_id = 'd{0}'.format(uuid.uuid4())
main_queues = self._get_queues()
main_exchanges = self._get_exchanges()
tenant_queues = self._get_queues(vhost)
tenant_exchanges = self._get_exchanges(vhost)
self.deploy_application(
resource('dsl/agent_tests/with_agent.yaml'),
deployment_id=deployment_id
)
# retrying these assertions (and the post-undeploy ones) because
# removing queues in our BlockingRequestResponseHandler in AMQP
# isn't synchronous, and we might just get more queues than we
# expected - this will however converge very quickly
# (normally sub-second)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=10)
def _post_deploy_assertions():
# installing the agent does nothing for the / vhost
assert self._get_queues() == main_queues
assert self._get_exchanges() == main_exchanges
# after installing the agent, there's 2 new queues and at least
# 1 new exchange
agent_queues = self._get_queues(vhost) - tenant_queues
agent_exchanges = self._get_exchanges(vhost) - tenant_exchanges
assert len(agent_queues) == 2, (
"expected 2 agent queues, but found {0}: {1}"
.format(len(agent_queues), agent_queues)
)
assert any(queue.endswith('_service') for queue in agent_queues)
assert any(queue.endswith('_operation') for queue in agent_queues)
assert any(exc.startswith('agent_host') for exc in agent_exchanges)
# we already checked that there's an agent exchange, but there
# might also exist a logs exchange and an events exchange,
# depending if any events or logs were sent or not
assert len(agent_exchanges) in (1, 2, 3)
_post_deploy_assertions()
self.undeploy_application(deployment_id)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=10)
def _post_undeploy_assertions():
main_queues = self._get_queues()
main_exchanges = self._get_exchanges()
tenant_queues = self._get_queues(vhost)
agent_exchanges = self._get_exchanges(vhost) - tenant_exchanges
# after uninstalling the agent, there's still no new queues on
# the / vhost
assert self._get_queues() == main_queues
assert self._get_exchanges() == main_exchanges
# there's no queues left over
assert self._get_queues(vhost) == tenant_queues
# the logs and events exchanges will still exist, but the agent
# exchange must have been deleted
assert not any(exc.startswith('agent_host')
for exc in agent_exchanges)
_post_undeploy_assertions()
def test_deploy_with_agent_worker(self):
# In 4.2, the default (remote) agent installation path only requires
# the "create" operation
install_events = [
"Task succeeded 'cloudify_agent.installer.operations.create'"
]
uninstall_events = [
"Task succeeded 'cloudify_agent.installer.operations.delete'"
]
self._test_deploy_with_agent_worker(
'dsl/agent_tests/with_agent.yaml',
install_events,
uninstall_events
)
def _test_deploy_with_agent_worker(self,
blueprint,
install_events,
uninstall_events):
deployment_id = 'd{0}'.format(uuid.uuid4())
dsl_path = resource(blueprint)
_, execution_id = self.deploy_application(
dsl_path,
deployment_id=deployment_id
)
events = self.client.events.list(execution_id=execution_id,
sort='timestamp')
filtered_events = [event['message'] for event in events if
event['message'] in install_events]
# Make sure the install events were called (in the correct order)
self.assertListEqual(install_events, filtered_events)
execution_id = self.undeploy_application(deployment_id)
events = self.client.events.list(execution_id=execution_id,
sort='timestamp')
filtered_events = [event['message'] for event in events if
event['message'] in uninstall_events]
# Make sure the uninstall events were called (in the correct order)
self.assertListEqual(uninstall_events, filtered_events)
@pytest.mark.usefixtures('target_aware_mock_plugin')
def test_deploy_with_operation_executor_override(self):
setup_deployment_id = 'd{0}'.format(uuid.uuid4())
dsl_path = resource('dsl/agent_tests/operation_executor_override.yaml')
_, execution_id = self.deploy_application(
dsl_path,
deployment_id=setup_deployment_id
)
webserver_nodes = self.client.node_instances.list(
deployment_id=setup_deployment_id,
node_id='webserver'
)
self.assertEqual(1, len(webserver_nodes))
webserver_node = webserver_nodes[0]
webserver_host_node = self.client.node_instances.list(
deployment_id=setup_deployment_id,
node_id='webserver_host'
)[0]
create_invocation = webserver_node.runtime_properties['create']
expected_create_invocation = {'target': webserver_host_node.id}
self.assertEqual(expected_create_invocation, create_invocation)
start_invocation = webserver_node.runtime_properties['start']
expected_start_invocation = {'target': 'cloudify.management'}
self.assertEqual(expected_start_invocation, start_invocation)
def test_script_executor(self):
"""Check that script-plugin scripts use the correct executor.
When the executor is not provided, the 'auto' executor kind will
detect whether the node-instance is on an agent or not, and
run its operations on the relevant executor.
In this case, we have a node type that has two instances - one
contained_in a compute, and one that isn't. The one in a compute
does run on the agent, and the other one runs on the mgmtworker.
"""
bp = """
tosca_definitions_version: cloudify_dsl_1_5
imports:
- cloudify/types/types.yaml
- plugin:dockercompute
node_types:
t1:
derived_from: cloudify.nodes.Root
interfaces:
cloudify.interfaces.lifecycle:
create: |
#!/usr/bin/env bash
ctx instance runtime-properties create "${AGENT_NAME:-mgmtworker}"
start: |
import os
from cloudify import ctx
ctx.instance.runtime_properties['start'] = \
os.environ.get('AGENT_NAME') or 'mgmtworker'
node_templates:
agent_host:
type: cloudify.nodes.docker.Compute
n1:
type: t1
relationships:
- type: cloudify.relationships.contained_in
target: agent_host
n2:
type: t1
""" # NOQA
self.upload_blueprint_resource(
self.make_yaml_file(bp),
blueprint_id='bp1',
)
dep, _ = self.deploy_application(self.make_yaml_file(bp))
agents = self.client.agents.list()
n1_inst = self.client.node_instances.list(
deployment_id=dep.id, node_id='n1')
n2_inst = self.client.node_instances.list(
deployment_id=dep.id, node_id='n2')
self.undeploy_application(dep.id)
assert len(agents) == 1
assert len(n1_inst) == 1
assert len(n2_inst) == 1
agent_id = agents[0].id
assert n1_inst[0].runtime_properties == \
{'create': agent_id, 'start': agent_id}
assert n2_inst[0].runtime_properties == \
{'create': 'mgmtworker', 'start': 'mgmtworker'}
|
8a844c8ce02fca8e64eb10d67fb8b764a19d4c3f
|
95b4a15808b9c412c8364db80fd619a65dd587e0
|
/docs/userguide/tutorials/geometry/curves_and_surfaces/curve_from_circle.py
|
8ecff71057d8d7f06ed3f66f3cf2cf4cb357ca74
|
[
"MIT"
] |
permissive
|
compas-dev/compas
|
11d5c4d9afd554833297b4a5dbe6a975e6940ce3
|
486e2e9332553240bcbd80e100d26bff58071709
|
refs/heads/main
| 2023-08-31T15:49:32.430570
| 2023-08-17T10:19:52
| 2023-08-17T10:19:52
| 104,857,648
| 286
| 116
|
MIT
| 2023-09-12T13:53:36
| 2017-09-26T08:28:01
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
curve_from_circle.py
|
from compas.geometry import Vector, Point, Plane
from compas.geometry import Polyline
from compas.geometry import Circle
from compas.geometry import NurbsCurve
from compas.artists import Artist
from compas.colors import Color
circle = Circle(Plane(Point(0, 0, 0), Vector(0, 0, 1)), 1.0)
curve = NurbsCurve.from_circle(circle)
# ==============================================================================
# Visualisation
# ==============================================================================
Artist.clear()
Artist(curve).draw(color=Color.green())
Artist(Polyline(curve.points)).draw(show_points=True)
Artist.redraw()
|
229e3581bb1c81b82b3cef07b865d02fb69afc5d
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/osf_tests/test_validators.py
|
dc5613893560a01bf19ffb9cb3652071dc8f95ad
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,760
|
py
|
test_validators.py
|
import pytest
from osf.exceptions import ValidationValueError
from osf.models import validators
from osf_tests.factories import SubjectFactory
# Ported from tests/framework/test_mongo.py
def test_string_required_passes_with_string():
assert validators.string_required('Hi!') is True
def test_string_required_fails_when_empty():
with pytest.raises(ValidationValueError):
validators.string_required(None)
with pytest.raises(ValidationValueError):
validators.string_required('')
@pytest.mark.django_db
def test_validate_expand_subject_hierarchy():
fruit = SubjectFactory()
apple = SubjectFactory(parent=fruit)
grapes = SubjectFactory(parent=fruit)
raisins = SubjectFactory(parent=grapes)
# test send in two flattened hierarchies that share a base
subject_list = [fruit._id, apple._id, grapes._id]
expanded = validators.expand_subject_hierarchy(subject_list)
assert len(expanded) == 3
assert fruit in expanded
assert apple in expanded
assert grapes in expanded
# test send in third level of a 3-tier hierarchy
subject_list = [raisins._id]
expanded = validators.expand_subject_hierarchy(subject_list)
assert len(expanded) == 3
assert raisins in expanded
assert grapes in expanded
assert fruit in expanded
# test send in first and third levels
subject_list = [raisins._id, fruit._id]
expanded = validators.expand_subject_hierarchy(subject_list)
assert len(expanded) == 3
assert raisins in expanded
assert grapes in expanded
assert fruit in expanded
# test invalid hierarchy
subject_list = [fruit._id, '12345_bad_id']
with pytest.raises(ValidationValueError):
validators.expand_subject_hierarchy(subject_list)
|
e099fe690cfca8f7dfb6161707857dd923c42b87
|
b8c1ee67d1e770ca0a49771751df8cef6a5b6935
|
/tests/test_benchmarks.py
|
be6e33ddeea91307d1e3c2bf51da57fb868c9281
|
[
"Apache-2.0",
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] |
permissive
|
PyThaiNLP/pythainlp
|
2922c6e4723f1828d39793eb722dc163d141c4f2
|
43cd4f8029d2d9b0d3fd1e4cc30faab7e8052eeb
|
refs/heads/dev
| 2023-09-04T03:42:44.488153
| 2023-08-20T04:10:28
| 2023-08-20T04:10:28
| 61,813,823
| 761
| 264
|
Apache-2.0
| 2023-08-20T04:10:30
| 2016-06-23T14:57:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,931
|
py
|
test_benchmarks.py
|
import unittest
import numpy as np
import yaml
from pythainlp.benchmarks import word_tokenization
with open("./tests/data/sentences.yml", "r", encoding="utf8") as stream:
TEST_DATA = yaml.safe_load(stream)
class TestBenchmarksPackage(unittest.TestCase):
def test_preprocessing(self):
self.assertIsNotNone(
word_tokenization.preprocessing(
txt="ทดสอบ การ ทำ ความสะอาด ข้อมูล<tag>ok</tag>"
)
)
def test_benchmark_not_none(self):
self.assertIsNotNone(
word_tokenization.benchmark(
["วัน", "จัน", "ทร์", "สี", "เหลือง"],
["วัน", "จันทร์", "สี", "เหลือง"],
)
)
def test_binary_representation(self):
sentence = "อากาศ|ร้อน|มาก|ครับ"
rept = word_tokenization._binary_representation(sentence)
self.assertEqual(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0], rept.tolist()
)
def test_compute_stats(self):
for pair in TEST_DATA["sentences"]:
exp, act = pair["expected"], pair["actual"]
result = word_tokenization.compute_stats(
word_tokenization.preprocessing(exp),
word_tokenization.preprocessing(act),
)
self.assertIsNotNone(result)
def test_benchmark(self):
expected = []
actual = []
for pair in TEST_DATA["sentences"]:
expected.append(pair["expected"])
actual.append(pair["actual"])
df = word_tokenization.benchmark(expected, actual)
self.assertIsNotNone(df)
def test_count_correctly_tokenised_words(self):
for d in TEST_DATA["binary_sentences"]:
sample = np.array(list(d["actual"])).astype(int)
ref_sample = np.array(list(d["expected"])).astype(int)
sb = list(word_tokenization._find_word_boudaries(sample))
rb = list(word_tokenization._find_word_boudaries(ref_sample))
# in binary [{0, 1}, ...]
correctly_tokenized_words = word_tokenization._find_words_correctly_tokenised(
rb, sb
)
self.assertEqual(
np.sum(correctly_tokenized_words), d["expected_count"]
)
def test_words_correctly_tokenised(self):
r = [(0, 2), (2, 10), (10, 12)]
s = [(0, 10), (10, 12)]
expected = "01"
labels = word_tokenization._find_words_correctly_tokenised(r, s)
self.assertEqual(expected, "".join(np.array(labels).astype(str)))
def test_flatten_result(self):
result = dict(key1=dict(v1=6), key2=dict(v2=7))
actual = word_tokenization._flatten_result(result)
self.assertEqual(actual, {"key1:v1": 6, "key2:v2": 7})
|
e58c6075bc67908b6b902e7661c4bad3abdc410e
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py
|
4a0d74d6dc933552fefd47e0e950749079a627fb
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,050
|
py
|
lstm_saliency_pruner.py
|
from typing import cast
import torch
from .base_structured_sparsifier import BaseStructuredSparsifier, FakeStructuredSparsity
class LSTMSaliencyPruner(BaseStructuredSparsifier):
"""
Prune packed LSTM weights based on saliency.
For each layer {k} inside a LSTM, we have two packed weight matrices
- weight_ih_l{k}
- weight_hh_l{k}
These tensors pack the weights for the 4 linear layers together for efficiency.
[W_ii | W_if | W_ig | W_io]
Pruning this tensor directly will lead to weights being misassigned when unpacked.
To ensure that each packed linear layer is pruned the same amount:
1. We split the packed weight into the 4 constituent linear parts
2. Update the mask for each individual piece using saliency individually
This applies to both weight_ih_l{k} and weight_hh_l{k}.
"""
def update_mask(self, module, tensor_name, **kwargs):
weights = getattr(module, tensor_name)
for p in getattr(module.parametrizations, tensor_name):
if isinstance(p, FakeStructuredSparsity):
mask = cast(torch.Tensor, p.mask)
# select weights based on magnitude
if weights.dim() <= 1:
raise Exception("Structured pruning can only be applied to a 2+dim weight tensor!")
# take norm over all but first dim
dims = tuple(range(1, weights.dim()))
saliency = weights.norm(dim=dims, p=1)
# handle weights in 4 groups
split_size = len(mask) // 4
masks = torch.split(mask, split_size)
saliencies = torch.split(saliency, split_size)
for keep_mask, sal in zip(masks, saliencies):
# mask smallest k values to be removed
k = int(len(keep_mask) * kwargs["sparsity_level"])
prune = sal.topk(k, largest=False, sorted=False).indices
keep_mask.data[prune] = False # modifies underlying p.mask directly
|
4f9561a5befeafd33f198d5eff693d8dc38d1627
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/poly.py
|
c4f79b1bddde5a78d257396d6fda1d6fc6b5133f
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
poly.py
|
"""Polynomial scheduler."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import numpy as np
from otx.algorithms.segmentation.adapters.mmseg.utils.builder import SCALAR_SCHEDULERS
from .base import BaseScalarScheduler
@SCALAR_SCHEDULERS.register_module()
class PolyScalarScheduler(BaseScalarScheduler):
"""The learning rate changes over time according to a polynomial schedule.
Args:
start_scale (float): The initial learning rate scale.
end_scale (float): The final learning rate scale.
num_iters (int): The number of iterations to reach the final learning rate.
power (float): The power of the polynomial schedule.
by_epoch (bool): Whether to use epoch as the unit of iteration.
"""
def __init__(
self,
start_scale: float,
end_scale: float,
num_iters: int,
power: float = 1.2,
by_epoch: bool = False,
):
super().__init__()
self._start_s = start_scale
assert self._start_s >= 0.0
self._end_s = end_scale
assert self._end_s >= 0.0
self._num_iters = num_iters
assert self._num_iters >= 0
self._power = power
assert self._power >= 0.0
self.by_epoch = by_epoch
def _get_value(self, step, epoch_size):
if step is None:
return float(self._end_s)
if self.by_epoch:
num_iters = epoch_size * self._num_iters
else:
num_iters = self._num_iters
if num_iters == 0:
return self._end_s
if step < num_iters:
factor = (self._end_s - self._start_s) / (1.0 - self._power)
var_a = factor / (num_iters**self._power)
var_b = -factor * self._power / float(num_iters)
out_value = var_a * np.power(step, self._power) + var_b * step + self._start_s
else:
out_value = self._end_s
return out_value
|
524dec4a9c0a5d6e65dbf3e1ebe73b94adc9a2c7
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_utils/pyunit_deprecations.py
|
e4870d6811a85247b3e206b178bc6e3dd626fe84
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,660
|
py
|
pyunit_deprecations.py
|
import os
import re
import sys
import warnings
from h2o.utils.metaclass import deprecated_params, deprecated_property, deprecated_fn, fullname
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils as pu
def test_deprecated_params_without_new_param():
class Foo:
@deprecated_params(dict(baz=None,
biz=None))
def __init__(self, foo=1, bar=2):
self.foo = foo
self.bar = bar
@deprecated_params(dict(operator=None))
def foobar(self, op='+'):
return eval("%s %s %s" % (self.foo, op, self.bar))
prefix = fullname(Foo.__init__)[:-8]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f = Foo()
assert f.foobar() == 3
assert len(w) == 0
f = Foo(foo=3, baz=5)
assert not hasattr(f, 'baz')
assert len(w) == 1
assert "``baz`` param of ``{}__init__`` is deprecated and will be ignored".format(prefix) in str(w[0].message)
del w[:]
assert f.foobar(operator="*") == 5
assert len(w) == 1
assert "``operator`` param of ``{}foobar`` is deprecated and will be ignored".format(prefix) in str(w[0].message)
def test_deprecated_params_with_replacement():
class Foo:
@deprecated_params(dict(Foo='foo',
Bar='bar'))
def __init__(self, foo=1, bar=2):
self.foo = foo
self.bar = bar
@deprecated_params(dict(operator='op'))
def foobar(self, op='+'):
return eval("%s %s %s" % (self.foo, op, self.bar))
prefix = fullname(Foo.__init__)[:-8]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f = Foo()
assert f.foobar() == 3
assert len(w) == 0
f = Foo(foo=3, Bar=5)
assert f.bar == 5
assert f.foobar() == 8
assert len(w) == 1
assert "``Bar`` param of ``{}__init__`` is deprecated, please use ``bar`` instead".format(prefix) in str(w[0].message)
del w[:]
assert f.foobar(operator="*") == 15
assert len(w) == 1
assert "``operator`` param of ``{}foobar`` is deprecated, please use ``op`` instead".format(prefix) in str(w[0].message)
del w[:]
f_conflict = Foo(foo=3, Foo=6)
assert f_conflict.foo == 3
assert f_conflict.foobar() == 5
assert len(w) == 2
assert "``Foo`` param of ``{}__init__`` is deprecated, please use ``foo`` instead".format(prefix) in str(w[0].message)
assert "Using both deprecated param ``Foo`` and new param(s) ``foo`` in call to ``{}__init__``, the deprecated param will be ignored.".format(prefix) in str(w[1].message)
del w[:]
f_conflict = Foo(Foo=6, foo=3) # verifying that the order is not important
assert f_conflict.foo == 3
assert f_conflict.foobar() == 5
assert len(w) == 2
assert "``Foo`` param of ``{}__init__`` is deprecated, please use ``foo`` instead".format(prefix) in str(w[0].message)
assert "Using both deprecated param ``Foo`` and new param(s) ``foo`` in call to ``{}__init__``, the deprecated param will be ignored.".format(prefix) in str(w[1].message)
del w[:]
def test_deprecated_params_message_can_be_customized():
class Foo:
@deprecated_params(dict(Foo=('foo', "Foo custom message"),
Baz=(None, "Baz custom message")))
def __init__(self, foo=1, bar=2):
self.foo = foo
self.bar = bar
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f = Foo(Foo=5, Baz=17)
assert f.foo == 5
assert f.bar == 2
assert len(w) == 2
assert str(w[0].message) == "Foo custom message"
assert str(w[1].message) == "Baz custom message"
def test_deprecated_params_advanced_syntax():
class Foo:
@deprecated_params(dict(duration_millis=(lambda millis: dict(duration=millis, unit='ms'),
"duration_millis custom message")))
def __init__(self, duration=1, unit='s'):
self.duration = duration
self.unit = unit
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f = Foo(duration_millis=600)
assert f.duration == 600
assert f.unit == 'ms'
assert len(w) == 1
assert str(w[0].message) == "duration_millis custom message"
def test_deprecated_property():
class Foo(object): # required to get it working on Py2
def __init__(self, bar=1):
self._bar = bar
@property
def bar(self):
return self._bar
@bar.setter
def bar(self, v):
self._bar = v
Bar = deprecated_property('Bar', replaced_by=bar)
Baz = deprecated_property('Baz')
Biz = deprecated_property('Biz', message="Biz custom message")
assert Foo.Bar.__doc__ == "[Deprecated] Use ``bar`` instead"
assert Foo.Baz.__doc__ == "[Deprecated] The property was removed and will be ignored."
assert Foo.Biz.__doc__ == "Biz custom message"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f = Foo(bar=5)
assert f.bar == 5
assert f.Bar == 5 # 1st warning
assert f.Baz is None # 2nd warning
assert f.Biz is None # 3rd warning
f.Bar = 7 # 4th warning
assert f.bar == 7
f.Baz = "useless" # 5th warning
assert f.Baz is None # 6th warning
assert len(w) == 6
assert str(w[0].message) == "``Bar`` is deprecated, please use ``bar`` instead."
assert str(w[1].message) == "``Baz`` is deprecated and will be ignored."
assert str(w[2].message) == "Biz custom message"
assert str(w[3].message) == str(w[0].message)
assert str(w[4].message) == str(w[1].message)
assert str(w[5].message) == str(w[1].message)
def test_deprecated_function():
def foo(bar=1):
return bar*bar
@deprecated_fn()
def fee(baz=3):
return foo(baz+2)
@deprecated_fn(replaced_by=foo)
def Foo():
pass
@deprecated_fn(replaced_by=foo, msg="custom FOO message")
def FOO():
pass
@deprecated_fn(msg="deprecated, replaced by ``foo``")
def foo_old_defaults(bar=2):
return foo(bar=bar)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert fee() == 25 # 1st warning
assert Foo() == 1 # 2nd warning
assert Foo(3) == 9 # 3rd warning
assert FOO() == 1 # 4th warning
assert FOO(9) == 81 # 5th warning
assert foo_old_defaults() == 4 # 6th warning
assert foo_old_defaults(3) == 9 # 7th warning
assert len(w) == 7
assert re.match(r"``[\w.<>]*fee`` is deprecated.", str(w[0].message))
assert re.match(r"``[\w.<>]*Foo`` is deprecated, please use ``[\w.<>]*foo`` instead.", str(w[1].message))
assert str(w[3].message) == "custom FOO message"
assert str(w[5].message) == "deprecated, replaced by ``foo``"
pu.run_tests([
test_deprecated_params_without_new_param,
test_deprecated_params_with_replacement,
test_deprecated_params_message_can_be_customized,
test_deprecated_params_advanced_syntax,
test_deprecated_property,
test_deprecated_function
])
|
f8a127dc0977b7576b948fac0be15a63b20938bb
|
ce5fd5607bcf8feb39f69a1363eee6c6cce5e7e7
|
/vmesssed.py
|
5c8952213f51ec2d30711eea3907e935a9214078
|
[
"MIT"
] |
permissive
|
boypt/vmess2json
|
e04387b5d273262f0d6b62dfca4116ce368d59c3
|
ebba0e94b98fda4267a6ad3e728aa797df8abb8a
|
refs/heads/master
| 2023-03-07T23:42:42.755095
| 2022-01-13T03:16:03
| 2022-01-13T03:16:03
| 166,919,168
| 467
| 149
|
MIT
| 2023-02-24T14:45:29
| 2019-01-22T03:14:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,160
|
py
|
vmesssed.py
|
#!/usr/bin/env python3
import os
import re
import json
import base64
import argparse
import binascii
vmscheme = "vmess://"
ssscheme = "ss://"
def parseLink(link):
if link.startswith(ssscheme):
return parseSs(link)
elif link.startswith(vmscheme):
return parseVmess(link)
else:
print("ERROR: unsupported line: "+link)
return None
def item2link(item):
if item["net"] == "shadowsocks":
auth = base64.b64encode("{method}:{password}".format(**item).encode()).decode()
addr = "{add}:{port}".format(**item)
sslink = "ss://{}@{}#{}".format(auth, addr, urllib.parse.quote(item["ps"]))
return sslink
else:
return "vmess://{}".format(base64.b64encode(json.dumps(item).encode()).decode())
def parseSs(sslink):
if sslink.startswith(ssscheme):
ps = ""
info = sslink[len(ssscheme):]
if info.rfind("#") > 0:
info, ps = info.split("#", 2)
ps = urllib.parse.unquote(ps)
if info.find("@") < 0:
# old style link
#paddings
blen = len(info)
if blen % 4 > 0:
info += "=" * (4 - blen % 4)
info = base64.b64decode(info).decode()
atidx = info.rfind("@")
method, password = info[:atidx].split(":", 2)
addr, port = info[atidx+1:].split(":", 2)
else:
atidx = info.rfind("@")
addr, port = info[atidx+1:].split(":", 2)
info = info[:atidx]
blen = len(info)
if blen % 4 > 0:
info += "=" * (4 - blen % 4)
info = base64.b64decode(info).decode()
method, password = info.split(":", 2)
return dict(net="shadowsocks", add=addr, port=port, method=method, password=password, ps=ps)
def parseVmess(vmesslink):
if vmesslink.startswith(vmscheme):
bs = vmesslink[len(vmscheme):]
#paddings
blen = len(bs)
if blen % 4 > 0:
bs += "=" * (4 - blen % 4)
vms = base64.b64decode(bs).decode()
return json.loads(vms)
else:
raise Exception("vmess link invalid")
def sed_loop(lines):
vmesses = []
menu_item = lambda x: "[{ps}] {add}:{port}/{net}".format(**x)
for _v in lines:
_vinfo = parseLink(_v)
if _vinfo is not None:
vmesses.append({
"menu": menu_item(_vinfo),
"link": _v,
"info": _vinfo
})
for vm in vmesses:
for key, plain in plain_amends.items():
val = vm["info"].get(key, None)
if val is None:
continue
vm["info"][key] = plain
for key, opt in sed_amends.items():
val = vm["info"].get(key, None)
if val is None:
continue
vm["info"][key] = re.sub(opt[0], opt[1], val, opt[2])
vm["link"] = item2link(vm["info"])
msg = lambda x: "{ps} / {net} / {add}:{port} / net:{net}/aid:{aid}/host:{host}/path:{path}/tls:{tls}/type:{type}".format(**x)
print(msg(vm["info"]))
if option.inplace:
output_item(vmesses)
def output_item(vmesses):
links = map(lambda x:x["link"], vmesses)
with open(option.edit[0], "w") as f:
f.write("\n".join(links)+"\n")
def parse_amendsed(val):
if not val.startswith("s"):
raise ValueError("not sed")
spliter = val[1:2]
_, pattern, repl, tags = sedcmd.split(spliter, maxsplit=4)
return pattern, repl, tags
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="vmess subscribe file editor.")
parser.add_argument('-m', '--amend', action='append', help="the sed command, can be multiple, only the replace form is supported,"
" example: -s 's/find/repl/i' -s 's#remove##' ")
parser.add_argument('-i', '--inplace', action='store_false', help="edit the filein place, like -i to sed command")
parser.add_argument('edit',
nargs=1,
type=str,
help="a subscribe text file, base64 encoded or not, or a single vmess:// ss:// link")
option = parser.parse_args()
sed_amends = {}
plain_amends = {}
if option.amend:
for s in option.amend:
key, sedcmd = s.split(":", maxsplit=1)
try:
pattern, repl, tags = parse_amendsed(sedcmd)
except ValueError:
plain_amends[key] = sedcmd
continue
reflag = 0
if "i" in tags:
reflag |= re.IGNORECASE
sed_amends[key] = [pattern, repl, reflag]
arg = option.edit[0]
if os.path.exists(arg):
with open(arg) as f:
origdata = indata = f.read().strip()
try:
blen = len(indata)
if blen % 4 > 0:
indata += "=" * (4 - blen % 4)
lines = base64.b64decode(indata).decode().splitlines()
except (binascii.Error, UnicodeDecodeError):
lines = origdata.splitlines()
finally:
sed_loop(lines)
|
a1251f6f7f65bdf5a6445d831bf026be2b9cebf0
|
0ba02a4fdb83ae1243dcbb71201b1ac663891cdd
|
/omnidet/train_distance_semantic_detection_motion.py
|
cfbf6895e7fa0100d7edcf5a14c65b2268b7ec72
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
valeoai/WoodScape
|
d3b522b03846e410de36a8d3a29d74892556a0ec
|
597d9dda472c09bafea58ea69853948d63197eca
|
refs/heads/master
| 2023-08-18T07:12:58.960682
| 2022-03-23T18:03:02
| 2022-03-23T18:03:02
| 199,622,246
| 530
| 121
| null | 2023-08-26T16:16:46
| 2019-07-30T09:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,413
|
py
|
train_distance_semantic_detection_motion.py
|
"""
Distance estimation, Semantic segmentation, 2D detection and Motion segmentation training for OmniDet.
# author: Varun Ravi Kumar <rvarun7777@gmail.com>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; Authors provide no warranty with the software
and are not liable for anything.
"""
import time
from colorama import Fore, Style
from losses.mtl_losses import UncertaintyLoss
from train_distance_semantic_detection import DistanceSemanticDetectionModelBase
from train_distance_semantic_motion import DistanceSemanticMotionModelBase
from train_semantic import SemanticModel
from train_motion import MotionModel
from train_detection import DetectionModel
class DistanceSemanticDetectionMotionModel(DistanceSemanticDetectionModelBase, DistanceSemanticMotionModelBase):
def __init__(self, args):
super().__init__(args)
self.mtl_loss = UncertaintyLoss(tasks=self.args.train).to(self.device)
self.parameters_to_train += list(self.mtl_loss.parameters())
self.configure_optimizers()
self.pre_init()
def distance_semantic_detection_motion_train(self):
"""Trainer function for distance, semantic, detection and motion prediction"""
print(f"{Fore.BLUE}=> Initial mAP for detection task: 0{Style.RESET_ALL}")
for self.epoch in range(self.args.epochs):
# switch to train mode
self.set_train()
data_loading_time = 0
gpu_time = 0
before_op_time = time.time()
for batch_idx, inputs in enumerate(self.train_loader):
data_loading_time += (time.time() - before_op_time)
before_op_time = time.time()
self.inputs_to_device(inputs)
# -- DISTANCE, SEMANTIC, OBJECT DETECTION AND MOTION SEG MODEL PREDICTIONS AND LOSS CALCULATIONS --
outputs, losses = self.distance_semantic_detection_motion_loss_predictions(inputs)
# -- MTL LOSS --
losses["mtl_loss"] = self.mtl_loss(losses)
# -- COMPUTE GRADIENT AND DO OPTIMIZER STEP --
self.optimizer.zero_grad()
losses["mtl_loss"].mean().backward()
self.optimizer.step()
duration = time.time() - before_op_time
gpu_time += duration
if batch_idx % self.args.log_frequency == 0:
self.log_time(batch_idx, duration, losses["mtl_loss"].mean().cpu().data,
data_loading_time, gpu_time)
self.distance_statistics("train", inputs, outputs, losses)
SemanticModel.semantic_statistics(self, "train", inputs, outputs, losses)
DetectionModel.detection_statistics(self, "train")
MotionModel.motion_statistics(self, "train", inputs, outputs, losses)
data_loading_time = 0
gpu_time = 0
if self.step % self.args.val_frequency == 0 and self.step != 0:
# -- SAVE SEMANTIC MODEL WITH BEST WEIGHTS BASED ON VALIDATION IoU --
self.save_best_semantic_weights()
# -- SAVE DETECTION MODEL WITH BEST WEIGHTS BASED ON VALIDATION mAP --
self.save_best_detection_weights()
DetectionModel.detection_statistics(self, "val")
# -- SAVE MOTION MODEL WITH BEST WEIGHTS BASED ON VALIDATION IoU --
self.save_best_motion_weights()
self.step += 1
before_op_time = time.time()
self.lr_scheduler.step()
if (self.epoch + 1) % self.args.save_frequency == 0:
self.save_model()
print("Training complete!")
def distance_semantic_detection_motion_loss_predictions(self, inputs):
features, outputs, losses = self.distance_semantic_motion_loss_predictions(inputs)
# Note: We are taking features passed through encoder when the dataset split for all the tasks is same
detection_outputs, detection_losses = self.predict_detection(inputs, outputs, features=features)
outputs.update(detection_outputs)
losses.update(detection_losses)
return outputs, losses
|
cc6c667df88ddbae77fb75edf7e2e5921a219c61
|
e34b975ff93191a65a2953745fba295fe42ccb95
|
/strategyManager/strategyTest.py
|
db4d7911cc79b9063bef7753ab75d55097d7a7e2
|
[
"MIT"
] |
permissive
|
sirnfs/OptionSuite
|
1d5869563ec440c86259a473478cffd571bf76dd
|
ab45132567ce71099be4fab19ac7bce4d9dd9191
|
refs/heads/master
| 2023-07-25T21:44:47.828135
| 2023-07-10T14:45:36
| 2023-07-10T14:45:36
| 88,125,604
| 155
| 51
|
MIT
| 2023-07-10T14:45:03
| 2017-04-13T04:39:44
|
Python
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
strategyTest.py
|
import datetime
import unittest
from optionPrimitives import optionPrimitive
from strategyManager import strategy
class TestStrategyClass(unittest.TestCase):
def testStrategyClassCreation(self):
"""Tests than an exception is raised when class is instantiated."""
with self.assertRaisesRegex(TypeError, 'Cannot instantiate class.'):
strategy.Strategy(startDateTime=datetime.datetime.now(), buyOrSell=optionPrimitive.TransactionType.SELL,
underlyingTicker='SPY', orderQuantity=1)
if __name__ == '__main__':
unittest.main()
|
6a307f8689de1df68e5de5d5d12fd6ca9fdb8d47
|
916b59ec59086d9ce39cfa58e86ab6ca335a9c37
|
/src/exoplanet/orbits/simple.py
|
c4c45e3c8936f09261a82edc1915dfed3d80e327
|
[
"MIT"
] |
permissive
|
exoplanet-dev/exoplanet
|
cfff3f283180b18af1ffda517167ada0f8f8e518
|
6b0c682ded988ef967e42359fbeac572b906ee19
|
refs/heads/main
| 2023-08-31T15:39:20.816098
| 2023-08-29T11:49:28
| 2023-08-29T11:49:28
| 138,077,978
| 118
| 38
|
MIT
| 2023-09-12T05:09:25
| 2018-06-20T19:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,661
|
py
|
simple.py
|
# -*- coding: utf-8 -*-
__all__ = ["SimpleTransitOrbit"]
import aesara_theano_fallback.tensor as tt
import numpy as np
from ..utils import as_tensor_variable
class SimpleTransitOrbit:
"""An orbit representing a set of planets transiting a common central
This orbit is parameterized by the observables of a transiting system,
period, phase, duration, and impact parameter.
Args:
period: The orbital period of the planets in days.
t0: The midpoint time of a reference transit for each planet in days.
b: The impact parameters of the orbits.
duration: The durations of the transits in days.
r_star: The radius of the star in ``R_sun``.
"""
def __init__(self, period, duration, t0=0.0, b=0.0, r_star=1.0, ror=0):
self.period = as_tensor_variable(period)
self.t0 = as_tensor_variable(t0)
self.b = as_tensor_variable(b)
self.duration = as_tensor_variable(duration)
self.r_star = as_tensor_variable(r_star)
self._b_norm = self.b * self.r_star
x2 = r_star**2 * ((1 + ror) ** 2 - b**2)
self.speed = 2 * np.sqrt(x2) / duration
self._half_period = 0.5 * self.period
self._ref_time = self.t0 - self._half_period
def get_star_position(self, t, light_delay=False):
nothing = tt.zeros_like(as_tensor_variable(t))
return nothing, nothing, nothing
def get_planet_position(self, t, light_delay=False):
return self.get_relative_position(t, light_delay=False)
def get_relative_position(self, t, light_delay=False):
"""The planets' positions relative to the star
Args:
t: The times where the position should be evaluated.
Returns:
The components of the position vector at ``t`` in units of
``R_sun``.
"""
if light_delay:
raise NotImplementedError(
"Light travel time delay is not implemented for simple orbits"
)
dt = tt.mod(tt.shape_padright(t) - self._ref_time, self.period)
dt -= self._half_period
x = tt.squeeze(self.speed * dt)
y = tt.squeeze(self._b_norm + tt.zeros_like(dt))
m = tt.abs_(dt) < 0.5 * self.duration
z = tt.squeeze(m * 1.0 - (~m) * 1.0)
return x, y, z
def get_planet_velocity(self, t):
raise NotImplementedError("a SimpleTransitOrbit has no velocity")
def get_star_velocity(self, t):
raise NotImplementedError("a SimpleTransitOrbit has no velocity")
def get_radial_velocity(self, t, output_units=None):
raise NotImplementedError("a SimpleTransitOrbit has no velocity")
def in_transit(self, t, r=None, texp=None, light_delay=False):
"""Get a list of timestamps that are in transit
Args:
t (vector): A vector of timestamps to be evaluated.
r (Optional): The radii of the planets.
texp (Optional[float]): The exposure time.
Returns:
The indices of the timestamps that are in transit.
"""
if light_delay:
raise NotImplementedError(
"Light travel time delay is not implemented for simple orbits"
)
dt = tt.mod(tt.shape_padright(t) - self._ref_time, self.period)
dt -= self._half_period
if r is None:
tol = 0.5 * self.duration
else:
x = (r + self.r_star) ** 2 - self._b_norm**2
tol = tt.sqrt(x) / self.speed
if texp is not None:
tol += 0.5 * texp
mask = tt.any(tt.abs_(dt) < tol, axis=-1)
return tt.arange(t.size)[mask]
|
2af2b28ee88c750639f0e5ea204138fd0f936fba
|
5095200e9ca55cd3a37af34ed44448c02e2a1bb5
|
/modules/image/Image_editing/super_resolution/swinir_m_real_sr_x4/module.py
|
8c40ab71924869b2ab690419d848ac66783c5013
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHub
|
8712603ef486c45e83eb0bc5725b0b3ed3ddbbde
|
b402610a6f0b382a978e82473b541ea1fc6cf09a
|
refs/heads/develop
| 2023-07-24T06:03:13.172978
| 2023-03-28T11:49:55
| 2023-03-28T11:49:55
| 162,672,577
| 12,914
| 2,239
|
Apache-2.0
| 2023-07-06T21:38:19
| 2018-12-21T06:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,674
|
py
|
module.py
|
import argparse
import base64
import os
import time
from typing import Union
import cv2
import numpy as np
import paddle
import paddle.nn as nn
from .swinir import SwinIR
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
def cv2_to_base64(image):
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tobytes()).decode('utf8')
def base64_to_cv2(b64str):
data = base64.b64decode(b64str.encode('utf8'))
data = np.frombuffer(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
@moduleinfo(
name='swinir_m_real_sr_x4',
version='1.0.0',
type="CV/image_editing",
author="",
author_email="",
summary="Image Restoration (Real image Super Resolution) Using Swin Transformer.",
)
class SwinIRMRealSR(nn.Layer):
def __init__(self):
super(SwinIRMRealSR, self).__init__()
self.default_pretrained_model_path = os.path.join(self.directory,
'003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pdparams')
self.swinir = SwinIR(upscale=4,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2,
upsampler='nearest+conv',
resi_connection='1conv')
state_dict = paddle.load(self.default_pretrained_model_path)
self.swinir.set_state_dict(state_dict)
self.swinir.eval()
def preprocess(self, img: np.ndarray) -> np.ndarray:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.transpose((2, 0, 1))
img = img / 255.0
return img.astype(np.float32)
def postprocess(self, img: np.ndarray) -> np.ndarray:
img = img.clip(0, 1)
img = img * 255.0
img = img.transpose((1, 2, 0))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img.astype(np.uint8)
def real_sr(self,
image: Union[str, np.ndarray],
visualization: bool = True,
output_dir: str = "swinir_m_real_sr_x4_output") -> np.ndarray:
if isinstance(image, str):
_, file_name = os.path.split(image)
save_name, _ = os.path.splitext(file_name)
save_name = save_name + '_' + str(int(time.time())) + '.jpg'
image = cv2.imdecode(np.fromfile(image, dtype=np.uint8), cv2.IMREAD_COLOR)
elif isinstance(image, np.ndarray):
save_name = str(int(time.time())) + '.jpg'
image = image
else:
raise Exception("image should be a str / np.ndarray")
with paddle.no_grad():
img_input = self.preprocess(image)
img_input = paddle.to_tensor(img_input[None, ...], dtype=paddle.float32)
img_output = self.swinir(img_input)
img_output = img_output.numpy()[0]
img_output = self.postprocess(img_output)
if visualization:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
save_path = os.path.join(output_dir, save_name)
cv2.imwrite(save_path, img_output)
return img_output
@runnable
def run_cmd(self, argvs):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.parser.add_argument('--input_path', type=str, help="Path to image.")
self.parser.add_argument('--output_dir',
type=str,
default='swinir_m_real_sr_x4_output',
help="The directory to save output images.")
args = self.parser.parse_args(argvs)
self.real_sr(image=args.input_path, visualization=True, output_dir=args.output_dir)
return 'Results are saved in %s' % args.output_dir
@serving
def serving_method(self, image, **kwargs):
"""
Run as a service.
"""
image = base64_to_cv2(image)
img_output = self.real_sr(image=image, **kwargs)
return cv2_to_base64(img_output)
|
18cc848b6d0b7376856782132d403c45e9f430d1
|
c168fe819b446640957e5e310ef89fcfe28662b3
|
/torchbenchmark/models/pytorch_CycleGAN_and_pix2pix/models/colorization_model.py
|
2b4a12722e52cf93b85504bbe9a078f7b396d28b
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
pytorch/benchmark
|
7b55e8d714de2ea873e03df43811aab3848485dd
|
df4da9bdff11a2f948d5bd4ac83da7922e6f44f4
|
refs/heads/main
| 2023-08-29T13:06:09.671728
| 2023-08-28T16:51:55
| 2023-08-28T16:51:55
| 92,541,759
| 685
| 220
|
BSD-3-Clause
| 2023-09-14T18:10:18
| 2017-05-26T19:21:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,013
|
py
|
colorization_model.py
|
from .pix2pix_model import Pix2PixModel
import torch
from skimage import color # used for lab2rgb
import numpy as np
class ColorizationModel(Pix2PixModel):
"""This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
The model training requires '-dataset_model colorization' dataset.
It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, we use 'colorization' dataset for this model.
See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
"""
Pix2PixModel.modify_commandline_options(parser, is_train)
parser.set_defaults(dataset_mode='colorization')
return parser
def __init__(self, opt):
"""Initialize the class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
For visualization, we set 'visual_names' as 'real_A' (input real image),
'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
"""
# reuse the pix2pix model
Pix2PixModel.__init__(self, opt)
# specify the images to be visualized.
self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
def lab2rgb(self, L, AB):
"""Convert an Lab tensor image to a RGB numpy output
Parameters:
L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
Returns:
rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
"""
AB2 = AB * 110.0
L2 = (L + 1.0) * 50.0
Lab = torch.cat([L2, AB2], dim=1)
Lab = Lab[0].data.cpu().float().numpy()
Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
rgb = color.lab2rgb(Lab) * 255
return rgb
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
|
44e33346c15df782c1cff5c1e213f95adfcb3891
|
90446a6169fc6e0eee42e50127d84140c45024f3
|
/examples/vae.py
|
5696f62e39d79f28a7148b587314df857c6a36b5
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/funsor
|
ab13a1fa6c9931143302daa7d3150db308797fdb
|
ff5e410de720bc1f423f57cf156592fc194b058c
|
refs/heads/master
| 2023-08-31T17:16:00.126501
| 2023-02-06T22:03:09
| 2023-02-06T22:03:09
| 168,430,006
| 230
| 23
|
Apache-2.0
| 2023-08-31T18:34:44
| 2019-01-30T23:13:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,968
|
py
|
vae.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: VAE MNIST
==================
"""
import argparse
import os
import typing
from collections import OrderedDict
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.datasets import MNIST
import funsor
import funsor.ops as ops
import funsor.torch.distributions as dist
from funsor.domains import Bint, Reals
REPO_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_PATH = os.path.join(REPO_PATH, "data")
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
def forward(self, image: Reals[28, 28]) -> typing.Tuple[Reals[20], Reals[20]]:
image = image.reshape(image.shape[:-2] + (-1,))
h1 = F.relu(self.fc1(image))
loc = self.fc21(h1)
scale = self.fc22(h1).exp()
return loc, scale
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def forward(self, z: Reals[20]) -> Reals[28, 28]:
h3 = F.relu(self.fc3(z))
out = torch.sigmoid(self.fc4(h3))
return out.reshape(out.shape[:-1] + (28, 28))
def main(args):
funsor.set_backend("torch")
# XXX Temporary fix after https://github.com/pyro-ppl/pyro/pull/2701
import pyro
pyro.enable_validation(False)
encoder = Encoder()
decoder = Decoder()
# These rely on type hints on the .forward() methods.
encode = funsor.function(encoder)
decode = funsor.function(decoder)
@funsor.montecarlo.MonteCarlo()
def loss_function(data, subsample_scale):
# Lazily sample from the guide.
loc, scale = encode(data)
q = funsor.Independent(
dist.Normal(loc["i"], scale["i"], value="z_i"), "z", "i", "z_i"
)
# Evaluate the model likelihood at the lazy value z.
probs = decode("z")
p = dist.Bernoulli(probs["x", "y"], value=data["x", "y"])
p = p.reduce(ops.add, {"x", "y"})
# Construct an elbo. This is where sampling happens.
elbo = funsor.Integrate(q, p - q, "z")
elbo = elbo.reduce(ops.add, "batch") * subsample_scale
loss = -elbo
return loss
train_loader = torch.utils.data.DataLoader(
MNIST(DATA_PATH, train=True, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size,
shuffle=True,
)
encoder.train()
decoder.train()
optimizer = optim.Adam(
list(encoder.parameters()) + list(decoder.parameters()), lr=1e-3
)
for epoch in range(args.num_epochs):
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
subsample_scale = float(len(train_loader.dataset) / len(data))
data = data[:, 0, :, :]
data = funsor.Tensor(data, OrderedDict(batch=Bint[len(data)]))
optimizer.zero_grad()
loss = loss_function(data, subsample_scale)
assert isinstance(loss, funsor.Tensor), loss.pretty()
loss.data.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 50 == 0:
print(f" loss = {loss.item()}")
if batch_idx and args.smoke_test:
return
print(f"epoch {epoch} train_loss = {train_loss}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="VAE MNIST Example")
parser.add_argument("-n", "--num-epochs", type=int, default=10)
parser.add_argument("--batch-size", type=int, default=8)
parser.add_argument("--smoke-test", action="store_true")
args = parser.parse_args()
main(args)
|
6476ca77c2d042c5f18f4051aee9fd2d9e24a3e0
|
2d5a3cde8291c1f733f63b83e3d02f77321a9f12
|
/python/django/django-realworld/django-realworld-example-app/conduit/apps/authentication/views.py
|
b12d8674448c200f954d7bd1252324832a395080
|
[
"BSD-3-Clause"
] |
permissive
|
DataDog/trace-examples
|
99d5e6e0984beefb08a2a3ead0dc35e19798d932
|
121636bbae446fb93f56c14a83ba819faf327d1f
|
refs/heads/master
| 2023-08-19T00:30:52.632661
| 2023-03-23T15:14:13
| 2023-03-23T15:14:13
| 61,754,713
| 106
| 73
|
BSD-3-Clause
| 2023-03-08T14:06:45
| 2016-06-22T22:08:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,145
|
py
|
views.py
|
from rest_framework import status
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from .renderers import UserJSONRenderer
from .serializers import (
LoginSerializer, RegistrationSerializer, UserSerializer
)
class RegistrationAPIView(APIView):
# Allow any user (authenticated or not) to hit this endpoint.
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = RegistrationSerializer
def post(self, request):
user = request.data.get('user', {})
# The create serializer, validate serializer, save serializer pattern
# below is common and you will see it a lot throughout this course and
# your own work later on. Get familiar with it.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
class LoginAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = LoginSerializer
def post(self, request):
user = request.data.get('user', {})
# Notice here that we do not call `serializer.save()` like we did for
# the registration endpoint. This is because we don't actually have
# anything to save. Instead, the `validate` method on our serializer
# handles everything we need.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class UserRetrieveUpdateAPIView(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (UserJSONRenderer,)
serializer_class = UserSerializer
def retrieve(self, request, *args, **kwargs):
# There is nothing to validate or save here. Instead, we just want the
# serializer to handle turning our `User` object into something that
# can be JSONified and sent to the client.
serializer = self.serializer_class(request.user)
return Response(serializer.data, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
user_data = request.data.get('user', {})
serializer_data = {
'username': user_data.get('username', request.user.username),
'email': user_data.get('email', request.user.email),
'profile': {
'bio': user_data.get('bio', request.user.profile.bio),
'image': user_data.get('image', request.user.profile.image)
}
}
# Here is that serialize, validate, save pattern we talked about
# before.
serializer = self.serializer_class(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
|
9dcac38d251f1a0da64449f133427acffaf5d967
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/ddtrace/contrib/asyncio/wrappers.py
|
ddbabc4ecac7d1a46db0a1701ced5af07b6720b3
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 953
|
py
|
wrappers.py
|
from .compat import asyncio_current_task
from .provider import AsyncioContextProvider
def wrapped_create_task(wrapped, instance, args, kwargs):
"""Wrapper for ``create_task(coro)`` that propagates the current active
``Context`` to the new ``Task``. This function is useful to connect traces
of detached executions.
Note: we can't just link the task contexts due to the following scenario:
* begin task A
* task A starts task B1..B10
* finish task B1-B9 (B10 still on trace stack)
* task A starts task C
* now task C gets parented to task B10 since it's still on the stack,
however was not actually triggered by B10
"""
new_task = wrapped(*args, **kwargs)
current_task = asyncio_current_task()
ctx = getattr(current_task, AsyncioContextProvider._CONTEXT_ATTR, None)
if ctx:
setattr(new_task, AsyncioContextProvider._CONTEXT_ATTR, ctx)
return new_task
|
f94b94fd2b90ccc15877a3c5e84b40c5a7be9312
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Tekla/Structures/ModelInternal_parts/dotAssembly_t.py
|
3b8806feb46b641253987a07ed655e29e52f2097
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
py
|
dotAssembly_t.py
|
class dotAssembly_t(object):
# no doc
aAssemblyOtherPartsIds=None
aAssemblySubAssemblyIds=None
aName=None
AssemblableId=None
MainAssembly=None
MainPart=None
ModelObject=None
nAssemblyOtherParts=None
nAssemblySubAssemblies=None
NumberingSeries=None
Type=None
|
9885012256f548a1a872c2671dee9801fead46ac
|
ca93302d06425c04492e92b801277496ecde68d3
|
/redis/commands/graph/execution_plan.py
|
179a80cca02732ca956c9051cf89889d8ff49704
|
[
"MIT"
] |
permissive
|
redis/redis-py
|
2c34d4511e31033ed85b70437454ff5c0c2c67d1
|
e3de026a90ef2cc35a5b68934029a0ef2a5b2f53
|
refs/heads/master
| 2023-09-04T09:53:01.713856
| 2023-08-31T09:26:48
| 2023-08-31T09:26:48
| 363,150
| 2,213
| 513
|
MIT
| 2023-09-14T14:05:30
| 2009-11-06T10:22:26
|
Python
|
UTF-8
|
Python
| false
| false
| 6,742
|
py
|
execution_plan.py
|
import re
class ProfileStats:
"""
ProfileStats, runtime execution statistics of operation.
"""
def __init__(self, records_produced, execution_time):
self.records_produced = records_produced
self.execution_time = execution_time
class Operation:
"""
Operation, single operation within execution plan.
"""
def __init__(self, name, args=None, profile_stats=None):
"""
Create a new operation.
Args:
name: string that represents the name of the operation
args: operation arguments
profile_stats: profile statistics
"""
self.name = name
self.args = args
self.profile_stats = profile_stats
self.children = []
def append_child(self, child):
if not isinstance(child, Operation) or self is child:
raise Exception("child must be Operation")
self.children.append(child)
return self
def child_count(self):
return len(self.children)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Operation):
return False
return self.name == o.name and self.args == o.args
def __str__(self) -> str:
args_str = "" if self.args is None else " | " + self.args
return f"{self.name}{args_str}"
class ExecutionPlan:
"""
ExecutionPlan, collection of operations.
"""
def __init__(self, plan):
"""
Create a new execution plan.
Args:
plan: array of strings that represents the collection operations
the output from GRAPH.EXPLAIN
"""
if not isinstance(plan, list):
raise Exception("plan must be an array")
if isinstance(plan[0], bytes):
plan = [b.decode() for b in plan]
self.plan = plan
self.structured_plan = self._operation_tree()
def _compare_operations(self, root_a, root_b):
"""
Compare execution plan operation tree
Return: True if operation trees are equal, False otherwise
"""
# compare current root
if root_a != root_b:
return False
# make sure root have the same number of children
if root_a.child_count() != root_b.child_count():
return False
# recursively compare children
for i in range(root_a.child_count()):
if not self._compare_operations(root_a.children[i], root_b.children[i]):
return False
return True
def __str__(self) -> str:
def aggraget_str(str_children):
return "\n".join(
[
" " + line
for str_child in str_children
for line in str_child.splitlines()
]
)
def combine_str(x, y):
return f"{x}\n{y}"
return self._operation_traverse(
self.structured_plan, str, aggraget_str, combine_str
)
def __eq__(self, o: object) -> bool:
"""Compares two execution plans
Return: True if the two plans are equal False otherwise
"""
# make sure 'o' is an execution-plan
if not isinstance(o, ExecutionPlan):
return False
# get root for both plans
root_a = self.structured_plan
root_b = o.structured_plan
# compare execution trees
return self._compare_operations(root_a, root_b)
def _operation_traverse(self, op, op_f, aggregate_f, combine_f):
"""
Traverse operation tree recursively applying functions
Args:
op: operation to traverse
op_f: function applied for each operation
aggregate_f: aggregation function applied for all children of a single operation
combine_f: combine function applied for the operation result and the children result
""" # noqa
# apply op_f for each operation
op_res = op_f(op)
if len(op.children) == 0:
return op_res # no children return
else:
# apply _operation_traverse recursively
children = [
self._operation_traverse(child, op_f, aggregate_f, combine_f)
for child in op.children
]
# combine the operation result with the children aggregated result
return combine_f(op_res, aggregate_f(children))
def _operation_tree(self):
"""Build the operation tree from the string representation"""
# initial state
i = 0
level = 0
stack = []
current = None
def _create_operation(args):
profile_stats = None
name = args[0].strip()
args.pop(0)
if len(args) > 0 and "Records produced" in args[-1]:
records_produced = int(
re.search("Records produced: (\\d+)", args[-1]).group(1)
)
execution_time = float(
re.search("Execution time: (\\d+.\\d+) ms", args[-1]).group(1)
)
profile_stats = ProfileStats(records_produced, execution_time)
args.pop(-1)
return Operation(
name, None if len(args) == 0 else args[0].strip(), profile_stats
)
# iterate plan operations
while i < len(self.plan):
current_op = self.plan[i]
op_level = current_op.count(" ")
if op_level == level:
# if the operation level equal to the current level
# set the current operation and move next
child = _create_operation(current_op.split("|"))
if current:
current = stack.pop()
current.append_child(child)
current = child
i += 1
elif op_level == level + 1:
# if the operation is child of the current operation
# add it as child and set as current operation
child = _create_operation(current_op.split("|"))
current.append_child(child)
stack.append(current)
current = child
level += 1
i += 1
elif op_level < level:
# if the operation is not child of current operation
# go back to it's parent operation
levels_back = level - op_level + 1
for _ in range(levels_back):
current = stack.pop()
level -= levels_back
else:
raise Exception("corrupted plan")
return stack[0]
|
b9a438582aa90fe76e70413a58050645d45f56fc
|
3d860de237a8e8b40da6594148bee7b2269af35a
|
/parse.py
|
3c6ae2e8e8447bc992bc29e6a3ed3e10f6ed7626
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
distributed-system-analysis/smallfile
|
16a02bf2f2bba39c825c5d5fe2882804a41afeaf
|
aa2519de5bf0b3b383db2c31eacbf8ac4328e19c
|
refs/heads/main
| 2023-05-24T21:24:12.116057
| 2023-05-08T21:59:34
| 2023-05-17T11:20:57
| 4,494,878
| 109
| 36
|
Apache-2.0
| 2023-05-23T20:09:42
| 2012-05-30T13:12:38
|
Python
|
UTF-8
|
Python
| false
| false
| 12,262
|
py
|
parse.py
|
# -*- coding: utf-8 -*-
"""
parse.py -- parses CLI commands for smallfile_cli.py
Copyright 2012 -- Ben England
Licensed under the Apache License at http://www.apache.org/licenses/LICENSE-2.0
See Appendix on this page for instructions pertaining to license.
"""
import argparse
import os
import smallfile
import smf_test_params
from parser_data_types import (
SmfParseException,
boolean,
directory_list,
file_size_distrib,
host_set,
non_negative_integer,
positive_integer,
)
from smallfile import SmallfileWorkload
yaml_parser_installed = False
try:
import yaml_parser
yaml_parser_installed = True
except ImportError:
pass
# parse command line
# return smf_test_params.smf_test_params instance
# defining all test parameters.
# default does short test in /var/tmp so you can see the program run
def parse():
# store as much as you can in SmallfileWorkload object
# so per-thread invocations inherit
test_params = smf_test_params.smf_test_params()
inv = test_params.master_invoke # for convenience
parser = argparse.ArgumentParser(description="parse smallfile CLI parameters")
add = parser.add_argument
add("--yaml-input-file", help="input YAML file containing all parameters below")
add(
"--output-json",
default=test_params.output_json,
help="if true then output JSON-format version of results",
)
add(
"--response-times",
type=boolean,
default=inv.measure_rsptimes,
help="if true then record response time of each file op",
)
add(
"--network-sync-dir",
help="if --top not shared filesystem, provide shared filesystem directory",
)
add(
"--operation",
default="cleanup",
choices=SmallfileWorkload.all_op_names,
help="type of operation to perform on each file",
)
add(
"--top",
type=directory_list,
default=inv.top_dirs,
help="top directory or directories used by smallfile",
)
add(
"--host-set",
type=host_set,
default=test_params.host_set,
help="list of workload generator hosts (or file containing it) ",
)
add(
"--launch-by-daemon",
type=boolean,
default=test_params.launch_by_daemon,
help="use non-ssh launcher to get test running",
)
add(
"--files",
type=positive_integer,
default=inv.iterations,
help="files processed per thread",
)
add(
"--threads",
type=positive_integer,
default=test_params.thread_count,
help="threads per client",
)
add(
"--files-per-dir",
type=positive_integer,
default=inv.files_per_dir,
help="files per (sub)directory",
)
add(
"--dirs-per-dir",
type=positive_integer,
default=inv.dirs_per_dir,
help="subdirectories per directory",
)
add(
"--record-size",
type=positive_integer,
default=inv.record_sz_kb,
help="record size (KB)",
)
add(
"--file-size",
type=non_negative_integer,
default=inv.total_sz_kb,
help="subdirectories per directory",
)
add(
"--file-size-distribution",
type=file_size_distrib,
default=inv.filesize_distr,
help='file size can be constant ("fixed") or random ("exponential")',
)
add(
"--fsync",
type=boolean,
default=inv.fsync,
help="call fsync() after each file is written/modified",
)
add(
"--xattr-size",
type=non_negative_integer,
default=inv.xattr_size,
help="extended attribute size (bytes)",
)
add(
"--xattr-count",
type=non_negative_integer,
default=inv.xattr_count,
help="number of extended attributes per file",
)
add(
"--pause",
type=non_negative_integer,
default=inv.pause_between_files,
help="pause between each file (microsec)",
)
add(
"--auto-pause",
type=boolean,
default=inv.auto_pause,
help="adjust pause between files automatically based on response times",
)
add(
"--cleanup-delay-usec-per-file",
type=non_negative_integer,
default=inv.cleanup_delay_usec_per_file,
help="time to delay after cleanup per file (microsec)",
)
add(
"--stonewall",
type=boolean,
default=inv.stonewall,
help="stop measuring as soon as first thread is done",
)
add(
"--finish",
type=boolean,
default=inv.finish_all_rq,
help="stop processing files as soon as first thread is done",
)
add("--prefix", default=inv.prefix, help="filename prefix")
add("--suffix", default=inv.suffix, help="filename suffix")
add(
"--hash-into-dirs",
type=boolean,
default=inv.hash_to_dir,
help="if true then pseudo-randomly place files into directories",
)
add(
"--same-dir",
type=boolean,
default=inv.is_shared_dir,
help="if true then all threads share the same directories",
)
add(
"--verbose",
type=boolean,
default=inv.verbose,
help="if true then log extra messages about test",
)
add(
"--permute-host-dirs",
type=boolean,
default=test_params.permute_host_dirs,
help="if true then shift clients to different host directories",
)
add(
"--record-ctime-size",
type=boolean,
default=inv.record_ctime_size,
help="if true then update file xattr with ctime+size",
)
add(
"--verify-read",
type=boolean,
default=inv.verify_read,
help="if true then check that data read = data written",
)
add(
"--incompressible",
type=boolean,
default=inv.incompressible,
help="if true then non-compressible data written",
)
# these parameters shouldn't be used by mere mortals
add(
"--min-dirs-per-sec",
type=positive_integer,
default=test_params.min_directories_per_sec,
help=argparse.SUPPRESS,
)
add(
"--log-to-stderr",
type=boolean,
default=inv.log_to_stderr,
help=argparse.SUPPRESS,
)
add("--remote-pgm-dir", default=test_params.remote_pgm_dir, help=argparse.SUPPRESS)
add("--slave", help=argparse.SUPPRESS)
add("--as-host", help=argparse.SUPPRESS)
add(
"--host-count",
type=positive_integer,
default=0,
help="total number of hosts/pods participating in smallfile test",
)
args = parser.parse_args()
inv.opname = args.operation
test_params.top_dirs = [os.path.abspath(p) for p in args.top]
test_params.launch_by_daemon = args.launch_by_daemon
inv.iterations = args.files
test_params.thread_count = inv.threads = args.threads
inv.files_per_dir = args.files_per_dir
inv.dirs_per_dir = args.dirs_per_dir
inv.record_sz_kb = args.record_size
inv.total_sz_kb = args.file_size
test_params.size_distribution = inv.filesize_distr = args.file_size_distribution
inv.xattr_size = args.xattr_size
inv.xattr_count = args.xattr_count
inv.prefix = args.prefix
inv.suffix = args.suffix
inv.hash_to_dir = args.hash_into_dirs
inv.pause_between_files = args.pause
inv.auto_pause = args.auto_pause
test_params.cleanup_delay_usec_per_file = (
inv.cleanup_delay_usec_per_file
) = args.cleanup_delay_usec_per_file
inv.stonewall = args.stonewall
inv.finish_all_rq = args.finish
inv.measure_rsptimes = args.response_times
inv.fsync = args.fsync
inv.record_ctime_size = args.record_ctime_size
test_params.permute_host_dirs = args.permute_host_dirs
test_params.output_json = args.output_json
inv.incompressible = args.incompressible
inv.verify_read = args.verify_read
test_params.min_directories_per_sec = args.min_dirs_per_sec
inv.is_shared_dir = args.same_dir
inv.verbose = args.verbose
inv.log_to_stderr = args.log_to_stderr
test_params.remote_pgm_dir = args.remote_pgm_dir
test_params.network_sync_dir = args.network_sync_dir
test_params.is_slave = args.slave
inv.onhost = smallfile.get_hostname(args.as_host)
test_params.host_set = args.host_set
inv.total_hosts = args.host_count
# if YAML input was used, update test_params object with this
# YAML parameters override CLI parameters
if args.yaml_input_file:
if not yaml_parser_installed:
raise SmfParseException("python yaml module not available - is this PyPy?")
yaml_parser.parse_yaml(test_params, args.yaml_input_file)
# total_hosts is a parameter that allows pod workloads to know
# how many other pods are doing the same thing
if inv.total_hosts == 0:
if test_params.host_set is not None:
inv.total_hosts = len(test_params.host_set)
else:
inv.total_hosts = 1
# network_sync_dir is where python processes share state
if not test_params.network_sync_dir:
test_params.network_sync_dir = os.path.join(
test_params.top_dirs[0], "network_shared"
)
# validate parameters further now that we know what they all are
sdmsg = "directory %s containing network sync dir. must exist on all hosts (including this one)"
parentdir = os.path.dirname(test_params.network_sync_dir)
if not os.path.isdir(parentdir) and args.host_set is not None:
raise SmfParseException(sdmsg % parentdir)
if inv.record_sz_kb > inv.total_sz_kb and inv.total_sz_kb != 0:
raise SmfParseException("record size cannot exceed file size")
if inv.record_sz_kb == 0 and inv.verbose:
print(
"record size not specified,large files will default to record size %d KB"
% (SmallfileWorkload.biggest_buf_size / inv.BYTES_PER_KB)
)
if test_params.top_dirs:
for d in test_params.top_dirs:
if len(d) < 6:
raise SmfParseException(
"directory less than 6 characters, cannot use top of filesystem, too dangerous"
)
if not os.path.isdir(d) and test_params.network_sync_dir is not None:
raise SmfParseException(
"you must ensure that shared directory "
+ d
+ " is accessible from this host and every remote host in test"
)
if test_params.top_dirs:
inv.set_top(test_params.top_dirs)
else:
test_params.top_dirs = inv.top_dirs
if test_params.network_sync_dir:
inv.network_dir = test_params.network_sync_dir
else:
test_params.network_sync_dir = inv.network_dir
inv.starting_gate = os.path.join(inv.network_dir, "starting_gate.tmp")
if inv.iterations < 10:
inv.stonewall = False
if inv.opname == "cleanup" and (inv.auto_pause or (inv.pause_between_files > 0)):
inv.auto_pause = False
inv.pause_between_files = 0
print("do not need pause between files during cleanup")
if inv.total_hosts * inv.threads == 1:
inv.auto_pause = False
inv.pause_between_files = 0
print("do not need pause between files for single-threaded workload")
if inv.auto_pause and inv.pause_between_files > 0:
inv.pause_between_files = 0
print("pause parameter not needed with auto-pause Y, setting pause to 0")
# create must finish all files so that subsequent ops have the files they need
# cleanup must finish all files so that all remnants of last test are removed
if (
["cleanup", "create", "mkdir"].__contains__(inv.opname)
) and not inv.finish_all_rq:
print("changing --finish to true for op type %s" % inv.opname)
inv.finish_all_rq = True
if not test_params.is_slave:
prm_list = test_params.human_readable()
for prm_name, prm_value in prm_list:
print("%40s : %s" % (prm_name, prm_value))
inv.reset()
test_params.recalculate_timeouts()
return test_params
|
a5e60e2bdee03a7fc771d0be637878427a2aad57
|
095e5e86c931af6553996b0a128c07d94b38cbca
|
/microbench/conftest.py
|
8e553ffe76185d17bd71edb2512da4acf1bd7b8f
|
[
"MIT"
] |
permissive
|
hpyproject/hpy
|
1dc9e5e855fa006b1728703c5925addbb43cf792
|
8310a762d78e3412464b1869959a77da013e6307
|
refs/heads/master
| 2023-09-03T21:18:17.273371
| 2023-07-24T07:26:14
| 2023-07-24T07:26:14
| 196,559,763
| 681
| 41
|
MIT
| 2023-07-24T07:26:16
| 2019-07-12T10:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,805
|
py
|
conftest.py
|
import re
import time
from collections import defaultdict
import pytest
import _valgrind
class Timer:
def __init__(self, nodeid):
self.nodeid = nodeid
self.start = None
self.stop = None
def __enter__(self):
if self.start is not None:
raise ValueError('You cannot use "with timer:" more than once')
_valgrind.lib.callgrind_start()
self.start = time.time()
def __exit__(self, etype, evalue, tb):
self.stop = time.time()
_valgrind.lib.callgrind_stop()
def __str__(self):
if self.start is None:
return '[NO TIMING]'
if self.stop is None:
return '[IN-PROGRESS]'
usec = (self.stop - self.start) * 1000
return f'{usec:.2f} us'
@property
def elapsed(self):
if self.start is not None and self.stop is not None:
return self.stop - self.start
return None
class TimerSession:
NODEID = re.compile(r'(.*)\[(.*)\]')
def __init__(self):
self.apis = set() # ['cpy', 'hpy', ...]
self.table = defaultdict(dict) # {shortid: {api: timer}}
self.timers = {} # nodeid -> Timer
def new_timer(self, nodeid):
shortid, api = self.split_nodeid(nodeid)
timer = Timer(nodeid)
self.apis.add(api)
self.table[shortid][api] = timer
self.timers[nodeid] = timer
return timer
def get_timer(self, nodeid):
return self.timers.get(nodeid)
def split_nodeid(self, nodeid):
shortid = '::'.join(nodeid.split('::')[-2:]) # take only class::function
m = self.NODEID.match(shortid)
if not m:
return shortid, ''
return m.group(1), m.group(2)
def format_ratio(self, reference, value):
if reference and reference.elapsed and value and value.elapsed:
ratio = value.elapsed / reference.elapsed
return f'[{ratio:.2f}]'
return ''
def display_summary(self, tr):
w = tr.write_line
w('')
tr.write_sep('=', 'BENCHMARKS', cyan=True)
w(' '*40 + ' cpy hpy')
w(' '*40 + '---------------- -------------------')
for shortid, timings in self.table.items():
cpy = timings.get('cpy')
hpy = timings.get('hpy')
hpy_ratio = self.format_ratio(cpy, hpy)
cpy = cpy or ''
hpy = hpy or ''
w(f'{shortid:<40} {cpy!s:>15} {hpy!s:>15} {hpy_ratio}')
w('')
@pytest.fixture
def timer(request, api):
nodeid = request.node.nodeid
return request.config._timersession.new_timer(nodeid)
def pytest_configure(config):
config._timersession = TimerSession()
config.addinivalue_line("markers", "hpy: mark modules using the HPy API")
config.addinivalue_line("markers", "cpy: mark modules using the old Python/C API")
def pytest_addoption(parser):
parser.addoption(
"--fast", action="store_true", default=False, help="run microbench faster"
)
parser.addoption(
"--slow", action="store_true", default=False, help="run microbench slower"
)
VERBOSE_TEST_NAME_LENGTH = 90
@pytest.hookimpl(hookwrapper=True)
def pytest_report_teststatus(report, config):
outcome = yield
category, letter, word = outcome.get_result()
timer = config._timersession.get_timer(report.nodeid)
if category == 'passed' and timer:
L = VERBOSE_TEST_NAME_LENGTH - len(report.nodeid)
word = str(timer).rjust(L)
markup = None
if timer.elapsed is None:
markup = {'yellow': True}
outcome.force_result((category, letter, (word, markup)))
def pytest_terminal_summary(terminalreporter, config):
config._timersession.display_summary(terminalreporter)
|
9a5aeba9ce5f7d3f3f3a492567ef8bab9ca0d353
|
76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6
|
/tfx/dsl/components/common/importer.py
|
48104e0d04d8e2c855ba6647091c0caaadd5479b
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tfx
|
0cfc9c55171352ecc98c9dfa8ffe976c689d7073
|
1b328504fa08a70388691e4072df76f143631325
|
refs/heads/master
| 2023-08-30T11:56:50.894497
| 2023-08-29T22:47:19
| 2023-08-29T22:48:26
| 169,116,405
| 2,116
| 899
|
Apache-2.0
| 2023-09-14T21:51:42
| 2019-02-04T17:14:36
|
Python
|
UTF-8
|
Python
| false
| false
| 13,077
|
py
|
importer.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Importer definition."""
from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
# Constant to access Importer importing result from Importer output dict.
IMPORT_RESULT_KEY = 'result'
# Constant to access output key from Importer exec_properties dict.
OUTPUT_KEY_KEY = 'output_key'
# Constant to access artifact uri from Importer exec_properties dict.
SOURCE_URI_KEY = 'artifact_uri'
# Constant to access re-import option from Importer exec_properties dict.
REIMPORT_OPTION_KEY = 'reimport'
def _set_artifact_properties(artifact: types.Artifact,
properties: Optional[Dict[str, Any]],
custom_properties: Optional[Dict[str, Any]]):
"""Sets properties and custom_properties to the given artifact."""
if properties is not None:
for key, value in properties.items():
setattr(artifact, key, value)
if custom_properties is not None:
for key, value in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(
f'Unexpected custom_property value type:{type(value)}')
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI, properties /
custom properties, and type, that artifact will be reused unless the
`reimport` argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(uri)
new_artifact_type = False
if mlmd_artifact_type and not mlmd_artifact_type.id:
try:
mlmd_artifact_type = metadata_handler.store.get_artifact_type(
mlmd_artifact_type.name)
except errors.NotFoundError:
# Artifact type is not registered, so it must be new.
new_artifact_type = True
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
result.is_external = True
_set_artifact_properties(result, properties, custom_properties)
# Only consider previous artifacts as candidates to reuse if:
# * reimport is False
# * the given artifact type is recognized by MLMD
# * the type and properties match the imported artifact
if not reimport and not new_artifact_type:
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
if mlmd_artifact_type and candidate_mlmd_artifact.type_id != mlmd_artifact_type.id:
# If mlmd_artifact_type is defined, don't reuse existing artifacts if
# they don't match the given type.
continue
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if (
not candidate_artifact.has_property(key)
or getattr(candidate_artifact, key) != value
):
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if (
not candidate_artifact.has_custom_property(key)
or candidate_artifact.get_int_custom_property(key) != value
):
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if (
not candidate_artifact.has_custom_property(key)
or candidate_artifact.get_string_custom_property(key) != value
):
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if previous_artifacts:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(
max(previous_artifacts, key=lambda m: m.create_time_since_epoch))
return result
def generate_output_dict(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None,
output_key: Optional[str] = None,
) -> Dict[str, List[types.Artifact]]:
"""Generates Importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
output_key: The key to use for the imported artifact in the Importer's
output dictionary. Defaults to 'result'.
Returns:
A dictionary with the only key `output_key` whose value is the Artifact.
"""
output_key = output_key or IMPORT_RESULT_KEY
return {
output_key: [
_prepare_artifact(
metadata_handler,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport)
]
}
class ImporterDriver(base_driver.BaseDriver):
"""Driver for Importer."""
def pre_execution(
self,
input_dict: Dict[str, types.BaseChannel],
output_dict: Dict[str, types.Channel],
exec_properties: Dict[str, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution.
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Create imported artifacts.
output_key = exec_properties[OUTPUT_KEY_KEY]
output_channel = output_dict[output_key]
output_artifacts = generate_output_dict(
self._metadata_handler,
uri=exec_properties[SOURCE_URI_KEY],
properties=output_channel.additional_properties,
custom_properties=output_channel.additional_custom_properties,
reimport=exec_properties[REIMPORT_OPTION_KEY],
output_artifact_class=output_channel.type,
output_key=output_key)
# Update execution with imported artifacts.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=output_artifacts,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
output_dict[output_key] = channel_utils.as_channel(
output_artifacts[output_key])
return data_types.ExecutionDecision(
input_dict={},
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=False)
class Importer(base_node.BaseNode):
"""Definition for TFX Importer.
The Importer is a special TFX node which registers an external resource into
MLMD so that downstream nodes can use the registered artifact as an input.
Here is an example to use the Importer:
```
importer = Importer(
source_uri='uri/to/schema',
artifact_type=standard_artifacts.Schema,
reimport=False).with_id('import_schema')
schema_gen = SchemaGen(
fixed_schema=importer.outputs['result'],
examples=...)
```
"""
def __init__(self,
source_uri: str,
artifact_type: Type[types.Artifact],
reimport: Optional[bool] = False,
properties: Optional[Dict[str, Union[str, int]]] = None,
custom_properties: Optional[Dict[str, Union[str, int]]] = None,
output_key: Optional[str] = None):
"""Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
output_key: The key to use for the imported artifact in the Importer's
output dictionary. Defaults to 'result'.
"""
self._source_uri = source_uri
self._reimport = reimport
self._output_key = output_key or IMPORT_RESULT_KEY
artifact = artifact_type()
artifact.is_external = True
_set_artifact_properties(artifact, properties, custom_properties)
output_channel = types.OutputChannel(
artifact_type,
producer_component=self,
output_key=self._output_key,
additional_properties=properties,
additional_custom_properties=custom_properties)
# TODO(b/161490287): remove static artifacts.
output_channel.set_artifacts([artifact])
self._output_dict = {self._output_key: output_channel}
super().__init__(driver_class=ImporterDriver)
@property
@doc_controls.do_not_generate_docs
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
"""Output Channel dict that contains imported artifacts."""
return self._output_dict
@property
@doc_controls.do_not_generate_docs
def exec_properties(self) -> Dict[str, Any]:
return {
SOURCE_URI_KEY: self._source_uri,
REIMPORT_OPTION_KEY: int(self._reimport),
OUTPUT_KEY_KEY: self._output_key,
}
|
ea9b3d3a586212847239b03925925ef3ccc5c3ae
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/tank_setup/sub_views/consumable_slot_model.py
|
4a310f063842eef25ceb39b12e6bc92f62a8a08a
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
consumable_slot_model.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/tank_setup/sub_views/consumable_slot_model.py
from gui.impl.gen.view_models.views.lobby.tank_setup.sub_views.base_slot_model import BaseSlotModel
class ConsumableSlotModel(BaseSlotModel):
__slots__ = ()
def __init__(self, properties=22, commands=0):
super(ConsumableSlotModel, self).__init__(properties=properties, commands=commands)
def getDescription(self):
return self._getString(18)
def setDescription(self, value):
self._setString(18, value)
def getIsBuiltIn(self):
return self._getBool(19)
def setIsBuiltIn(self, value):
self._setBool(19, value)
def getItemName(self):
return self._getString(20)
def setItemName(self, value):
self._setString(20, value)
def getIsBuyMoreDisabled(self):
return self._getBool(21)
def setIsBuyMoreDisabled(self, value):
self._setBool(21, value)
def _initialize(self):
super(ConsumableSlotModel, self)._initialize()
self._addStringProperty('description', '')
self._addBoolProperty('isBuiltIn', False)
self._addStringProperty('itemName', '')
self._addBoolProperty('isBuyMoreDisabled', False)
|
efed85d870504101c18454be6ba8a65ef1d66c1e
|
4262804598ec9669d3537459591bf8e710436b5a
|
/src/imagetagger/base/context_processors.py
|
a043ca05445c4d624968d05733c7f20e4275bc5e
|
[
"MIT"
] |
permissive
|
bit-bots/imagetagger
|
5a639c35bf57e28eec2f340f0786cc3c5c34c982
|
f8ac5f25bf7375baeef8b23b1be7da331246bde2
|
refs/heads/master
| 2022-11-28T22:57:05.551770
| 2021-11-26T10:59:18
| 2021-11-26T10:59:18
| 96,428,939
| 255
| 59
|
MIT
| 2022-11-22T09:21:10
| 2017-07-06T12:41:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 789
|
py
|
context_processors.py
|
from django.conf import settings
from imagetagger.users.models import Team
from imagetagger.tagger_messages.models import TeamMessage
from django.db.models import Q
def base_data(request):
if request.user.is_authenticated:
my_teams = Team.objects.filter(members=request.user)
unread_message_count = TeamMessage.in_range(TeamMessage.get_messages_for_user(request.user).filter(~Q(read_by=request.user))).count()
else:
my_teams = None
unread_message_count = 0
return {
'IMPRINT_URL': settings.IMPRINT_URL,
'USE_IMPRINT': settings.USE_IMPRINT,
'IMPRINT_NAME': settings.IMPRINT_NAME,
'TOOLS_ENABLED': settings.TOOLS_ENABLED,
'my_teams': my_teams,
'unread_message_count': unread_message_count,
}
|
6ad4b279e630a465c5fc9ed22586c932ff176ee6
|
7b4ce7c1411fb2a3b0ab088d89bf93ed97b1905d
|
/dep/scintilla/scintilla-3.21.0/test/XiteQt.py
|
1b3a8dfa06164de028a0e025011496b72efcb8d8
|
[
"MIT",
"LicenseRef-scancode-scintilla"
] |
permissive
|
gitahead/gitahead
|
9fb86aee4df2e60fb1ff5bec615e3f6876b70049
|
81df5b468fc3ebd148320d894e561fb097324b88
|
refs/heads/master
| 2023-08-15T01:20:34.055687
| 2022-05-24T21:03:38
| 2022-05-24T21:03:38
| 159,882,575
| 1,894
| 318
|
MIT
| 2022-04-06T18:37:07
| 2018-11-30T21:52:34
|
C++
|
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
XiteQt.py
|
# -*- coding: utf-8 -*-
# Requires Python 2.7 or later
import ctypes, os, sys, unittest
from PySide.QtCore import *
from PySide.QtGui import *
import ScintillaCallable
sys.path.append("..")
from bin import ScintillaEditPy
scintillaDirectory = ".."
scintillaIncludeDirectory = os.path.join(scintillaDirectory, "include")
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(460,300)
# Create widget
self.edit = ScintillaEditPy.ScintillaEdit(self)
class XiteWin():
def __init__(self, test=""):
self.face = Face.Face()
self.face.ReadFromFile(os.path.join(scintillaIncludeDirectory, "Scintilla.iface"))
self.test = test
self.form = Form()
scifn = self.form.edit.send(int(self.face.features["GetDirectFunction"]["Value"]), 0, 0)
sciptr = ctypes.c_char_p(self.form.edit.send(
int(self.face.features["GetDirectPointer"]["Value"]), 0,0))
self.ed = ScintillaCallable.ScintillaCallable(self.face, scifn, sciptr)
self.form.show()
def DoStuff(self):
print(self.test)
self.CmdTest()
def DoEvents(self):
QApplication.processEvents()
def CmdTest(self):
runner = unittest.TextTestRunner()
tests = unittest.defaultTestLoader.loadTestsFromName(self.test)
results = runner.run(tests)
print(results)
sys.exit(0)
xiteFrame = None
def main(test):
global xiteFrame
app = QApplication(sys.argv)
xiteFrame = XiteWin(test)
xiteFrame.DoStuff()
sys.exit(app.exec_())
|
a637b9c30a4d7a2e05a979cde00666322a59cf5c
|
e8201f803bb23a1b9a3eab9fc0fc9b1709e65d2e
|
/manim_ml/neural_network/architectures/feed_forward.py
|
5a1f059c5f337a69eb1488a584ce56eb6af31449
|
[
"MIT"
] |
permissive
|
helblazer811/ManimML
|
20bc3548ceab75745a8d8088929fec51057e130f
|
5df233ea90aba16611d29c6a4b7717eb08ae7e09
|
refs/heads/main
| 2023-08-09T07:50:38.605540
| 2023-07-22T02:43:52
| 2023-07-22T02:43:52
| 454,906,591
| 1,339
| 73
|
MIT
| 2023-04-11T02:22:49
| 2022-02-02T19:26:55
|
Python
|
UTF-8
|
Python
| false
| false
| 750
|
py
|
feed_forward.py
|
import manim_ml
from manim_ml.neural_network.neural_network import NeuralNetwork
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
class FeedForwardNeuralNetwork(NeuralNetwork):
"""NeuralNetwork with just feed forward layers"""
def __init__(
self,
layer_node_count,
node_radius=0.08,
node_color=manim_ml.config.color_scheme.primary_color,
**kwargs
):
# construct layer
layers = []
for num_nodes in layer_node_count:
layer = FeedForwardLayer(
num_nodes, node_color=node_color, node_radius=node_radius
)
layers.append(layer)
# call super class
super().__init__(layers, **kwargs)
|
c07d5591470636cfb7e7b71051aad9192dea555a
|
105f8bb5f417248b2c56fec113746472cea94f5d
|
/labs/07_seq2seq/solutions/make_input_output.py
|
0732a5a467aefc7d53ae1aa9e23480bffada6a3a
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
m2dsupsdlclass/lectures-labs
|
659ce7d8f7a9eb94e776f16a10d0d1df3f037365
|
a41bdfde52081eaa615d86c46fceeae1c4b1d0cd
|
refs/heads/master
| 2023-06-01T20:30:17.669627
| 2022-12-07T11:20:05
| 2022-12-07T11:20:05
| 82,718,394
| 1,482
| 674
|
MIT
| 2022-03-10T21:34:29
| 2017-02-21T19:27:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
make_input_output.py
|
def make_input_output(source_tokens, target_tokens, reverse_source=True):
if reverse_source:
source_tokens = list(reversed(source_tokens))
input_tokens = source_tokens + [GO] + target_tokens
output_tokens = target_tokens + [EOS]
return input_tokens, output_tokens
|
3f229521fb2ef4a795d00daca0b2244a775cab70
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/mne/source_estimate.py
|
8a39b542bef61f388b61d63f8aec75e1de34aa89
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 131,745
|
py
|
source_estimate.py
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD-3-Clause
import contextlib
import copy
import os.path as op
from types import GeneratorType
import numpy as np
from scipy import sparse
from scipy.spatial.distance import cdist, pdist
from .baseline import rescale
from .cov import Covariance
from .evoked import _get_peak
from .filter import resample
from .fixes import _safe_svd
from ._freesurfer import _get_mri_info_data, _get_atlas_values, read_freesurfer_lut
from ._fiff.constants import FIFF
from ._fiff.pick import pick_types
from .surface import read_surface, _get_ico_surface, mesh_edges, _project_onto_surface
from .source_space._source_space import (
_ensure_src,
_get_morph_src_reordering,
_ensure_src_subject,
SourceSpaces,
_get_src_nn,
_check_volume_labels,
)
from .transforms import _get_trans, apply_trans
from .utils import (
get_subjects_dir,
_check_subject,
logger,
verbose,
_pl,
_time_mask,
warn,
copy_function_doc_to_method_doc,
fill_doc,
_check_option,
_validate_type,
_check_src_normal,
_check_stc_units,
_check_pandas_installed,
_import_nibabel,
_check_pandas_index_arguments,
_convert_times,
_ensure_int,
_build_data_frame,
_check_time_format,
_path_like,
sizeof_fmt,
object_size,
_check_fname,
_import_h5io_funcs,
TimeMixin,
)
from .viz import (
plot_source_estimates,
plot_vector_source_estimates,
plot_volume_source_estimates,
)
from ._fiff.meas_info import Info
def _read_stc(filename):
"""Aux Function."""
with open(filename, "rb") as fid:
buf = fid.read()
stc = dict()
offset = 0
num_bytes = 4
# read tmin in ms
stc["tmin"] = (
float(np.frombuffer(buf, dtype=">f4", count=1, offset=offset).item()) / 1000.0
)
offset += num_bytes
# read sampling rate in ms
stc["tstep"] = (
float(np.frombuffer(buf, dtype=">f4", count=1, offset=offset).item()) / 1000.0
)
offset += num_bytes
# read number of vertices/sources
vertices_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset).item())
offset += num_bytes
# read the source vector
stc["vertices"] = np.frombuffer(buf, dtype=">u4", count=vertices_n, offset=offset)
offset += num_bytes * vertices_n
# read the number of timepts
data_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset).item())
offset += num_bytes
if (
vertices_n
and ( # vertices_n can be 0 (empty stc)
(len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)
)
!= 0
):
raise ValueError("incorrect stc file size")
# read the data matrix
stc["data"] = np.frombuffer(
buf, dtype=">f4", count=vertices_n * data_n, offset=offset
)
stc["data"] = stc["data"].reshape([data_n, vertices_n]).T
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file.
Parameters
----------
filename : path-like
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
with open(filename, "wb") as fid:
# write start time in ms
fid.write(np.array(1000 * tmin, dtype=">f4").tobytes())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype=">f4").tobytes())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype=">u4").tobytes())
# write the vertex indices
fid.write(np.array(vertices, dtype=">u4").tobytes())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype=">u4").tobytes())
# write the data
fid.write(np.array(data.T, dtype=">f4").tobytes())
def _read_3(fid):
"""Read 3 byte integer from file."""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : path-like
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, "rb", buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype=">f4", count=1).item()
w = dict()
w["vertices"] = vertices
w["data"] = data
return w
def _write_3(fid, val):
"""Write 3 byte integer to file."""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tobytes())
def _write_w(filename, vertices, data):
"""Write a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: path-like
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert len(vertices) == len(data)
with open(filename, "wb") as fid:
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tobytes())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype=">f4").tobytes())
def read_source_estimate(fname, subject=None):
"""Read a source estimate object.
Parameters
----------
fname : path-like
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate
The source estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named ``'*-vl.stc``` or ``'*-vol.stc'``
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere (``'*-lh.stc'``,
``'*-rh.stc'``) or only specify the asterisk part in these patterns. In
any case, the function expects files for both hemisphere with names
following this pattern.
- for vector surface source estimates, only HDF5 files are supported.
- for mixed source estimates, only HDF5 files are supported.
- for single time point ``.w`` files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
``'*-lh.w'`` and ``'*-rh.w'``.
""" # noqa: E501
fname_arg = fname
# expand `~` without checking whether the file actually exists – we'll
# take care of that later, as it's complicated by the different suffixes
# STC files can have
fname = str(_check_fname(fname=fname, overwrite="read", must_exist=False))
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith(("-vl.stc", "-vol.stc", "-vl.w", "-vol.w")):
ftype = "volume"
elif fname.endswith(".stc"):
ftype = "surface"
if fname.endswith(("-lh.stc", "-rh.stc")):
fname = fname[:-7]
else:
err = (
"Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')" % fname
)
raise OSError(err)
elif fname.endswith(".w"):
ftype = "w"
if fname.endswith(("-lh.w", "-rh.w")):
fname = fname[:-5]
else:
err = (
"Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')" % fname
)
raise OSError(err)
elif fname.endswith(".h5"):
ftype = "h5"
fname = fname[:-3]
else:
raise RuntimeError("Unknown extension for file %s" % fname_arg)
if ftype != "volume":
stc_exist = [op.exists(f) for f in [fname + "-rh.stc", fname + "-lh.stc"]]
w_exist = [op.exists(f) for f in [fname + "-rh.w", fname + "-lh.w"]]
if all(stc_exist) and ftype != "w":
ftype = "surface"
elif all(w_exist):
ftype = "w"
elif op.exists(fname + ".h5"):
ftype = "h5"
elif op.exists(fname + "-stc.h5"):
ftype = "h5"
fname += "-stc"
elif any(stc_exist) or any(w_exist):
raise OSError("Hemisphere missing for %r" % fname_arg)
else:
raise OSError("SourceEstimate File(s) not found for: %r" % fname_arg)
# read the files
if ftype == "volume": # volume source space
if fname.endswith(".stc"):
kwargs = _read_stc(fname)
elif fname.endswith(".w"):
kwargs = _read_w(fname)
kwargs["data"] = kwargs["data"][:, np.newaxis]
kwargs["tmin"] = 0.0
kwargs["tstep"] = 0.0
else:
raise OSError("Volume source estimate must end with .stc or .w")
kwargs["vertices"] = [kwargs["vertices"]]
elif ftype == "surface": # stc file with surface source spaces
lh = _read_stc(fname + "-lh.stc")
rh = _read_stc(fname + "-rh.stc")
assert lh["tmin"] == rh["tmin"]
assert lh["tstep"] == rh["tstep"]
kwargs = lh.copy()
kwargs["data"] = np.r_[lh["data"], rh["data"]]
kwargs["vertices"] = [lh["vertices"], rh["vertices"]]
elif ftype == "w": # w file with surface source spaces
lh = _read_w(fname + "-lh.w")
rh = _read_w(fname + "-rh.w")
kwargs = lh.copy()
kwargs["data"] = np.atleast_2d(np.r_[lh["data"], rh["data"]]).T
kwargs["vertices"] = [lh["vertices"], rh["vertices"]]
# w files only have a single time point
kwargs["tmin"] = 0.0
kwargs["tstep"] = 1.0
ftype = "surface"
elif ftype == "h5":
read_hdf5, _ = _import_h5io_funcs()
kwargs = read_hdf5(fname + ".h5", title="mnepython")
ftype = kwargs.pop("src_type", "surface")
if isinstance(kwargs["vertices"], np.ndarray):
kwargs["vertices"] = [kwargs["vertices"]]
if ftype != "volume":
# Make sure the vertices are ordered
vertices = kwargs["vertices"]
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs["data"][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs["vertices"] = vertices
kwargs["data"] = data
if "subject" not in kwargs:
kwargs["subject"] = subject
if subject is not None and subject != kwargs["subject"]:
raise RuntimeError(
'provided subject name "%s" does not match '
'subject name from the file "%s' % (subject, kwargs["subject"])
)
if ftype in ("volume", "discrete"):
klass = VolVectorSourceEstimate
elif ftype == "mixed":
klass = MixedVectorSourceEstimate
else:
assert ftype == "surface"
klass = VectorSourceEstimate
if kwargs["data"].ndim < 3:
klass = klass._scalar_class
return klass(**kwargs)
def _get_src_type(src, vertices, warn_text=None):
src_type = None
if src is None:
if warn_text is None:
warn("src should not be None for a robust guess of stc type.")
else:
warn(warn_text)
if isinstance(vertices, list) and len(vertices) == 2:
src_type = "surface"
elif (
isinstance(vertices, np.ndarray)
or isinstance(vertices, list)
and len(vertices) == 1
):
src_type = "volume"
elif isinstance(vertices, list) and len(vertices) > 2:
src_type = "mixed"
else:
src_type = src.kind
assert src_type in ("surface", "volume", "mixed", "discrete")
return src_type
def _make_stc(
data,
vertices,
src_type=None,
tmin=None,
tstep=None,
subject=None,
vector=False,
source_nn=None,
warn_text=None,
):
"""Generate a surface, vector-surface, volume or mixed source estimate."""
def guess_src_type():
return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)
src_type = guess_src_type() if src_type is None else src_type
if vector and src_type == "surface" and source_nn is None:
raise RuntimeError("No source vectors supplied.")
# infer Klass from src_type
if src_type == "surface":
Klass = VectorSourceEstimate if vector else SourceEstimate
elif src_type in ("volume", "discrete"):
Klass = VolVectorSourceEstimate if vector else VolSourceEstimate
elif src_type == "mixed":
Klass = MixedVectorSourceEstimate if vector else MixedSourceEstimate
else:
raise ValueError(
"vertices has to be either a list with one or more " "arrays or an array"
)
# Rotate back for vector source estimates
if vector:
n_vertices = sum(len(v) for v in vertices)
assert data.shape[0] in (n_vertices, n_vertices * 3)
if len(data) == n_vertices:
assert src_type == "surface" # should only be possible for this
assert source_nn.shape == (n_vertices, 3)
data = data[:, np.newaxis] * source_nn[:, :, np.newaxis]
else:
data = data.reshape((-1, 3, data.shape[-1]))
assert source_nn.shape in ((n_vertices, 3, 3), (n_vertices * 3, 3))
# This will be an identity transform for volumes, but let's keep
# the code simple and general and just do the matrix mult
data = np.matmul(
np.transpose(source_nn.reshape(n_vertices, 3, 3), axes=[0, 2, 1]), data
)
return Klass(data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject)
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations."""
compat = False
if type(a) != type(b):
raise ValueError("Cannot combine %s and %s." % (type(a), type(b)))
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv) for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError(
"Cannot combine source estimates that do not have "
"the same vertices. Consider using stc.expand()."
)
if a.subject != b.subject:
raise ValueError(
"source estimates do not have the same subject "
"names, %r and %r" % (a.subject, b.subject)
)
class _BaseSourceEstimate(TimeMixin):
_data_ndim = 2
@verbose
def __init__(
self, data, vertices, tmin, tstep, subject=None, verbose=None
): # noqa: D102
assert hasattr(self, "_data_ndim"), self.__class__.__name__
assert hasattr(self, "_src_type"), self.__class__.__name__
assert hasattr(self, "_src_count"), self.__class__.__name__
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError("If data is a tuple it has to be length 2")
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError(
"kernel (%s) and sens_data (%s) have invalid "
"dimensions" % (kernel.shape, sens_data.shape)
)
if sens_data.ndim != 2:
raise ValueError(
"The sensor data must have 2 dimensions, got "
"%s" % (sens_data.ndim,)
)
_validate_type(vertices, list, "vertices")
if self._src_count is not None:
if len(vertices) != self._src_count:
raise ValueError(
"vertices must be a list with %d entries, "
"got %s" % (self._src_count, len(vertices))
)
vertices = [np.array(v, np.int64) for v in vertices] # makes copy
if any(np.any(np.diff(v) <= 0) for v in vertices):
raise ValueError("Vertices must be ordered in increasing order.")
n_src = sum([len(v) for v in vertices])
# safeguard the user against doing something silly
if data is not None:
if data.ndim not in (self._data_ndim, self._data_ndim - 1):
raise ValueError(
"Data (shape %s) must have %s dimensions for "
"%s" % (data.shape, self._data_ndim, self.__class__.__name__)
)
if data.shape[0] != n_src:
raise ValueError(
f"Number of vertices ({n_src}) and stc.data.shape[0] "
f"({data.shape[0]}) must match"
)
if self._data_ndim == 3:
if data.shape[1] != 3:
raise ValueError(
"Data for VectorSourceEstimate must have "
"shape[1] == 3, got shape %s" % (data.shape,)
)
if data.ndim == self._data_ndim - 1: # allow upbroadcasting
data = data[..., np.newaxis]
self._data = data
self._tmin = tmin
self._tstep = tstep
self.vertices = vertices
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self._times = None
self._update_times()
self.subject = _check_subject(None, subject, raise_error=False)
def __repr__(self): # noqa: D105
s = "%d vertices" % (sum(len(v) for v in self.vertices),)
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data shape : %s" % (self.shape,)
sz = sum(object_size(x) for x in (self.vertices + [self.data]))
s += f", ~{sizeof_fmt(sz)}"
return "<%s | %s>" % (type(self).__name__, s)
@fill_doc
def get_peak(
self, tmin=None, tmax=None, mode="abs", vert_as_index=False, time_as_index=False
):
"""Get location and latency of peak amplitude.
Parameters
----------
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
stc = self.magnitude() if self._data_ndim == 3 else self
if self._n_vertices == 0:
raise RuntimeError("Cannot find peaks with no vertices")
vert_idx, time_idx, _ = _get_peak(stc.data, self.times, tmin, tmax, mode)
if not vert_as_index:
vert_idx = np.concatenate(self.vertices)[vert_idx]
if not time_as_index:
time_idx = self.times[time_idx]
return vert_idx, time_idx
@verbose
def extract_label_time_course(
self, labels, src, mode="auto", allow_empty=False, verbose=None
):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(labels_eltc)s
%(src_eltc)s
%(mode_eltc)s
%(allow_empty_eltc)s
%(verbose)s
Returns
-------
%(label_tc_el_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self,
labels,
src,
mode=mode,
return_generator=False,
allow_empty=allow_empty,
verbose=verbose,
)
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct source estimate data.
Parameters
----------
%(baseline_stc)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
The baseline-corrected source estimate object.
Notes
-----
Baseline correction can be done multiple times.
"""
self.data = rescale(self.data, self.times, baseline, copy=False)
return self
@verbose
def save(self, fname, ftype="h5", *, overwrite=False, verbose=None):
"""Save the full source estimate to an HDF5 file.
Parameters
----------
fname : path-like
The file name to write the source estimate to, should end in
``'-stc.h5'``.
ftype : str
File format to use. Currently, the only allowed values is ``"h5"``.
%(overwrite)s
.. versionadded:: 1.0
%(verbose)s
"""
fname = _check_fname(fname=fname, overwrite=True) # check below
if ftype != "h5":
raise ValueError(
"%s objects can only be written as HDF5 files."
% (self.__class__.__name__,)
)
_, write_hdf5 = _import_h5io_funcs()
if fname.suffix != ".h5":
fname = fname.with_name(f"{fname.name}-stc.h5")
fname = _check_fname(fname=fname, overwrite=overwrite)
write_hdf5(
fname,
dict(
vertices=self.vertices,
data=self.data,
tmin=self.tmin,
tstep=self.tstep,
subject=self.subject,
src_type=self._src_type,
),
title="mnepython",
overwrite=True,
)
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot(
self,
subject=None,
surface="inflated",
hemi="lh",
colormap="auto",
time_label="auto",
smoothing_steps=10,
transparent=True,
alpha=1.0,
time_viewer="auto",
subjects_dir=None,
figure=None,
views="auto",
colorbar=True,
clim="auto",
cortex="classic",
size=800,
background="black",
foreground=None,
initial_time=None,
time_unit="s",
backend="auto",
spacing="oct6",
title=None,
show_traces="auto",
src=None,
volume_options=1.0,
view_layout="vertical",
add_data_kwargs=None,
brain_kwargs=None,
verbose=None,
):
brain = plot_source_estimates(
self,
subject,
surface=surface,
hemi=hemi,
colormap=colormap,
time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent,
alpha=alpha,
time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure,
views=views,
colorbar=colorbar,
clim=clim,
cortex=cortex,
size=size,
background=background,
foreground=foreground,
initial_time=initial_time,
time_unit=time_unit,
backend=backend,
spacing=spacing,
title=title,
show_traces=show_traces,
src=src,
volume_options=volume_options,
view_layout=view_layout,
add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs,
verbose=verbose,
)
return brain
@property
def sfreq(self):
"""Sample rate of the data."""
return 1.0 / self.tstep
@property
def _n_vertices(self):
return sum(len(v) for v in self.vertices)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data."""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Restrict SourceEstimate to a time interval.
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
%(include_tmax)s
Returns
-------
stc : instance of SourceEstimate
The cropped source estimate.
"""
mask = _time_mask(
self.times, tmin, tmax, sfreq=self.sfreq, include_tmax=include_tmax
)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[..., mask]
else:
self.data = self.data[..., mask]
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad="auto", window="boxcar", n_jobs=None, verbose=None):
"""Resample data.
If appropriate, an anti-aliasing filter is applied before resampling.
See :ref:`resampling-and-decimating` for more information.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : str | tuple
Window to use in resampling. See :func:`scipy.signal.resample`.
%(n_jobs)s
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
The resampled source estimate.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
from .filter import _check_resamp_noop
o_sfreq = 1.0 / self.tstep
if _check_resamp_noop(sfreq, o_sfreq):
return self
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
data = self.data
if data.dtype == np.float32:
data = data.astype(np.float64)
self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
return self
@property
def data(self):
"""Numpy array of source estimate data."""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@data.setter
def data(self, value):
value = np.asarray(value)
if self._data is not None and value.ndim != self._data.ndim:
raise ValueError("Data array should have %d dimensions." % self._data.ndim)
n_verts = sum(len(v) for v in self.vertices)
if value.shape[0] != n_verts:
raise ValueError(
"The first dimension of the data array must "
"match the number of vertices (%d != %d)" % (value.shape[0], n_verts)
)
self._data = value
self._update_times()
@property
def shape(self):
"""Shape of the data."""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
@property
def tmin(self):
"""The first timestamp."""
return self._tmin
@tmin.setter
def tmin(self, value):
self._tmin = float(value)
self._update_times()
@property
def tstep(self):
"""The change in time between two consecutive samples (1 / sfreq)."""
return self._tstep
@tstep.setter
def tstep(self, value):
if value <= 0:
raise ValueError(".tstep must be greater than 0.")
self._tstep = float(value)
self._update_times()
@property
def times(self):
"""A timestamp for each sample."""
return self._times
@times.setter
def times(self, value):
raise ValueError(
"You cannot write to the .times attribute directly. "
"This property automatically updates whenever "
".tmin, .tstep or .data changes."
)
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep."""
self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))
self._times.flags.writeable = False
def __add__(self, a):
"""Add source estimates."""
stc = self.copy()
stc += a
return stc
def __iadd__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data += a.data
else:
self.data += a
return self
def mean(self):
"""Make a summary stc file with mean over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
out = self.sum()
out /= len(self.times)
return out
def sum(self):
"""Make a summary stc file with sum over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[-1]
tmin = (self.tmin + tmax) / 2.0
tstep = tmax - self.tmin
sum_stc = self.__class__(
self.data.sum(axis=-1, keepdims=True),
vertices=self.vertices,
tmin=tmin,
tstep=tstep,
subject=self.subject,
)
return sum_stc
def __sub__(self, a):
"""Subtract source estimates."""
stc = self.copy()
stc -= a
return stc
def __isub__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data -= a.data
else:
self.data -= a
return self
def __truediv__(self, a): # noqa: D105
return self.__div__(a)
def __div__(self, a): # noqa: D105
"""Divide source estimates."""
stc = self.copy()
stc /= a
return stc
def __itruediv__(self, a): # noqa: D105
return self.__idiv__(a)
def __idiv__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data /= a.data
else:
self.data /= a
return self
def __mul__(self, a):
"""Multiply source estimates."""
stc = self.copy()
stc *= a
return stc
def __imul__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data *= a.data
else:
self.data *= a
return self
def __pow__(self, a): # noqa: D105
stc = self.copy()
stc **= a
return stc
def __ipow__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
self.data **= a
return self
def __radd__(self, a): # noqa: D105
return self + a
def __rsub__(self, a): # noqa: D105
return self - a
def __rmul__(self, a): # noqa: D105
return self * a
def __rdiv__(self, a): # noqa: D105
return self / a
def __neg__(self): # noqa: D105
"""Negate the source estimate."""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc.data *= -1
return stc
def __pos__(self): # noqa: D105
return self
def __abs__(self):
"""Compute the absolute value of the data.
Returns
-------
stc : instance of _BaseSourceEstimate
A version of the source estimate, where the data attribute is set
to abs(self.data).
"""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc._data = abs(stc._data)
return stc
def sqrt(self):
"""Take the square root.
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of source estimate instance.
Returns
-------
stc : instance of SourceEstimate
A copy of the source estimate.
"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Return a source estimate object with data summarized over time bins.
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The binned source estimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nt = len(times) - 1
data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[..., i] = func(self.data[..., idx], axis=-1)
tmin = times[0] + width / 2.0
stc = self.copy()
stc._data = data
stc.tmin = tmin
stc.tstep = width
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn(
"Performance can be improved by not accessing the data "
"attribute before calling this method."
)
# transform source space data directly
data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(
data_shape[0], np.prod(data_shape[1:])
)
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. See Notes for details.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : SourceEstimate | VectorSourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
# min and max data indices to include
times = 1000.0 * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
# +1, because upper boundary needs to include the last sample
tmax_idx = t_idx[-1] + 1
data_t = self.transform_data(
func, idx=idx, tmin_idx=tmin_idx, tmax_idx=tmax_idx
)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmin = self.times[tmin_idx]
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [
SourceEstimate(
data_t[:, :, a], verts, tmin, self.tstep, self.subject
)
for a in range(data_t.shape[-1])
]
else:
raise ValueError(
"copy must be True if transformed data has "
"more than 2 dimensions"
)
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs.vertices = verts
stcs.data = data_t
stcs.tmin = tmin
return stcs
@verbose
def to_data_frame(
self,
index=None,
scalings=None,
long_format=False,
time_format=None,
*,
verbose=None,
):
"""Export data in tabular structure as a pandas DataFrame.
Vertices are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(index_df_evk)s
Defaults to ``None``.
%(scalings_df)s
%(long_format_df_stc)s
%(time_format_df)s
.. versionadded:: 0.20
%(verbose)s
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ["time", "subject"]
valid_time_formats = ["ms", "timedelta"]
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
data = self.data.T
times = self.times
# prepare extra columns / multiindex
mindex = list()
default_index = ["time"]
if self.subject is not None:
default_index = ["subject", "time"]
mindex.append(("subject", np.repeat(self.subject, data.shape[0])))
times = _convert_times(self, times, time_format)
mindex.append(("time", times))
# triage surface vs volume source estimates
col_names = list()
kinds = ["VOL"] * len(self.vertices)
if isinstance(self, (_BaseSurfaceSourceEstimate, _BaseMixedSourceEstimate)):
kinds[:2] = ["LH", "RH"]
for ii, (kind, vertno) in enumerate(zip(kinds, self.vertices)):
col_names.extend(["{}_{}".format(kind, vert) for vert in vertno])
# build DataFrame
df = _build_data_frame(
self,
data,
None,
long_format,
mindex,
index,
default_index=default_index,
col_names=col_names,
col_kind="source",
)
return df
def _center_of_mass(
vertices, values, hemi, surf, subject, subjects_dir, restrict_vertices
):
"""Find the center of mass on a surface."""
if (values == 0).all() or (values < 0).any():
raise ValueError(
"All values must be non-negative and at least one "
"must be non-zero, cannot compute COM"
)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(subjects_dir / subject / "surf" / f"{hemi}.{surf}")
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == "surface" and hemi == "rh" else 0
restrict_vertices = restrict_vertices[idx]["vertno"]
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(
np.sqrt(np.mean((surf[0][restrict_vertices, :] - c_o_m) ** 2, axis=1))
)
vertex = restrict_vertices[vertex]
return vertex
@fill_doc
class _BaseSurfaceSourceEstimate(_BaseSourceEstimate):
"""Abstract base class for surface source estimates.
Parameters
----------
data : array
The data in source space.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
%(tmin)s
%(tstep)s
%(subject_optional)s
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
data : array
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
_src_type = "surface"
_src_count = 2
@property
def lh_data(self):
"""Left hemisphere data."""
return self.data[: len(self.lh_vertno)]
@property
def rh_data(self):
"""Right hemisphere data."""
return self.data[len(self.lh_vertno) :]
@property
def lh_vertno(self):
"""Left hemisphere vertno."""
return self.vertices[0]
@property
def rh_vertno(self):
"""Right hemisphere vertno."""
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == "lh":
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == "rh":
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The source estimate restricted to the given label.
"""
# make sure label and stc are compatible
from .label import Label, BiHemiLabel
_validate_type(label, (Label, BiHemiLabel), "label")
if (
label.subject is not None
and self.subject is not None
and label.subject != self.subject
):
raise RuntimeError(
"label and stc must have same subject names, "
'currently "%s" and "%s"' % (label.subject, self.subject)
)
if label.hemi == "both":
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == "lh":
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
else:
assert label.hemi == "rh"
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
if sum([len(v) for v in vertices]) == 0:
raise ValueError("No vertices match the label in the stc file")
label_stc = self.__class__(
values,
vertices=vertices,
tmin=self.tmin,
tstep=self.tstep,
subject=self.subject,
)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices.
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError("vertices must be a list")
if not len(self.vertices) == len(vertices):
raise ValueError("vertices must have the same length as " "stc.vertices")
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds),) + self.data.shape[1:])
self.data = np.insert(self.data, inds, new_data, axis=0)
return self
@verbose
def to_original_src(
self, src_orig, subject_orig=None, subjects_dir=None, verbose=None
):
"""Get a source estimate from morphed source to the original subject.
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
%(subjects_dir)s
%(verbose)s
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The transformed source estimate.
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError("stc.subject must be set")
src_orig = _ensure_src(src_orig, kind="surface")
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir
)
return self.__class__(
self._data[data_idx], vertices, self.tmin, self.tstep, subject_orig
)
@fill_doc
def get_peak(
self,
hemi=None,
tmin=None,
tmax=None,
mode="abs",
vert_as_index=False,
time_as_index=False,
):
"""Get location and latency of peak amplitude.
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
_check_option("hemi", hemi, ("lh", "rh", None))
vertex_offset = 0
if hemi is not None:
if hemi == "lh":
data = self.lh_data
vertices = [self.lh_vertno, []]
else:
vertex_offset = len(self.vertices[0])
data = self.rh_data
vertices = [[], self.rh_vertno]
meth = self.__class__(data, vertices, self.tmin, self.tstep).get_peak
else:
meth = super().get_peak
out = meth(
tmin=tmin,
tmax=tmax,
mode=mode,
vert_as_index=vert_as_index,
time_as_index=time_as_index,
)
if vertex_offset and vert_as_index:
out = (out[0] + vertex_offset, out[1])
return out
@fill_doc
class SourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for surface source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. When it is a single array, the
left hemisphere is stored in data[:len(vertices[0])] and the right
hemisphere is stored in data[-len(vertices[1]):].
When data is a tuple, it contains two arrays:
- "kernel" shape (n_vertices, n_sensors) and
- "sens_data" shape (n_sensors, n_times).
In this case, the source space data corresponds to
``np.dot(kernel, sens_data)``.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
%(tmin)s
%(tstep)s
%(subject_optional)s
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
VectorSourceEstimate : A container for vector surface source estimates.
VolSourceEstimate : A container for volume source estimates.
VolVectorSourceEstimate : A container for volume vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype="stc", *, overwrite=False, verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : path-like
The stem of the file name. The file names used for surface source
spaces are obtained by adding ``"-lh.stc"`` and ``"-rh.stc"`` (or
``"-lh.w"`` and ``"-rh.w"``) to the stem provided, for the left and
the right hemisphere, respectively.
ftype : str
File format to use. Allowed values are ``"stc"`` (default),
``"w"``, and ``"h5"``. The ``"w"`` format only supports a single
time point.
%(overwrite)s
.. versionadded:: 1.0
%(verbose)s
"""
fname = str(_check_fname(fname=fname, overwrite=True)) # checked below
_check_option("ftype", ftype, ["stc", "w", "h5"])
lh_data = self.data[: len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno) :]
if ftype == "stc":
if np.iscomplexobj(self.data):
raise ValueError(
"Cannot save complex-valued STC data in "
"FIFF format; please set ftype='h5' to save "
"in HDF5 format instead, or cast the data to "
"real numbers before saving."
)
logger.info("Writing STC to disk...")
fname_l = str(_check_fname(fname + "-lh.stc", overwrite=overwrite))
fname_r = str(_check_fname(fname + "-rh.stc", overwrite=overwrite))
_write_stc(
fname_l,
tmin=self.tmin,
tstep=self.tstep,
vertices=self.lh_vertno,
data=lh_data,
)
_write_stc(
fname_r,
tmin=self.tmin,
tstep=self.tstep,
vertices=self.rh_vertno,
data=rh_data,
)
elif ftype == "w":
if self.shape[1] != 1:
raise ValueError("w files can only contain a single time " "point")
logger.info("Writing STC to disk (w format)...")
fname_l = str(_check_fname(fname + "-lh.w", overwrite=overwrite))
fname_r = str(_check_fname(fname + "-rh.w", overwrite=overwrite))
_write_w(fname_l, vertices=self.lh_vertno, data=lh_data[:, 0])
_write_w(fname_r, vertices=self.rh_vertno, data=rh_data[:, 0])
elif ftype == "h5":
super().save(fname, overwrite=overwrite)
logger.info("[done]")
@verbose
def estimate_snr(self, info, fwd, cov, verbose=None):
r"""Compute time-varying SNR in the source space.
This function should only be used with source estimates with units
nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).
See also :footcite:`GoldenholzEtAl2009`.
.. warning:: This function currently only works properly for fixed
orientation.
Parameters
----------
%(info_not_none)s
fwd : instance of Forward
The forward solution used to create the source estimate.
cov : instance of Covariance
The noise covariance used to estimate the resting cortical
activations. Should be an evoked covariance, not empty room.
%(verbose)s
Returns
-------
snr_stc : instance of SourceEstimate
The source estimate with the SNR computed.
Notes
-----
We define the SNR in decibels for each source location at each
time point as:
.. math::
{\rm SNR} = 10\log_10[\frac{a^2}{N}\sum_k\frac{b_k^2}{s_k^2}]
where :math:`\\b_k` is the signal on sensor :math:`k` provided by the
forward model for a source with unit amplitude, :math:`a` is the
source amplitude, :math:`N` is the number of sensors, and
:math:`s_k^2` is the noise variance on sensor :math:`k`.
References
----------
.. footbibliography::
"""
from .forward import convert_forward_solution, Forward
from .minimum_norm.inverse import _prepare_forward
_validate_type(fwd, Forward, "fwd")
_validate_type(info, Info, "info")
_validate_type(cov, Covariance, "cov")
_check_stc_units(self)
if (self.data >= 0).all():
warn(
"This STC appears to be from free orientation, currently SNR"
" function is valid only for fixed orientation"
)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
# G is gain matrix [ch x src], cov is noise covariance [ch x ch]
G, _, _, _, _, _, _, cov, _ = _prepare_forward(
fwd,
info,
cov,
fixed=True,
loose=0,
rank=None,
pca=False,
use_cps=True,
exp=None,
limit_depth_chs=False,
combine_xyz="fro",
allow_fixed_depth=False,
limit=None,
)
G = G["sol"]["data"]
n_channels = cov["dim"] # number of sensors/channels
b_k2 = (G * G).T
s_k2 = np.diag(cov["data"])
scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)
snr_stc = self.copy()
snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)
return snr_stc
@fill_doc
def center_of_mass(
self,
subject=None,
hemi=None,
restrict_vertices=False,
subjects_dir=None,
surf="sphere",
):
"""Compute the center of mass of activity.
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in :footcite:`LarsonLee2013`.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.vertex_to_mni`.
Parameters
----------
subject : str | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
See Also
--------
mne.Label.center_of_mass
mne.vertex_to_mni
References
----------
.. footbibliography::
"""
if not isinstance(surf, str):
raise TypeError("surf must be a string, got %s" % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError("Cannot compute COM with negative values")
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [
np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0]),
]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi]) for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError("Could not infer hemisphere")
hemi = hemi[0]
_check_option("hemi", hemi, [0, 1])
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices,
values,
hemi=["lh", "rh"][hemi],
surf=surf,
subject=subject,
subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices,
)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
class _BaseVectorSourceEstimate(_BaseSourceEstimate):
_data_ndim = 3
@verbose
def __init__(
self, data, vertices=None, tmin=None, tstep=None, subject=None, verbose=None
): # noqa: D102
assert hasattr(self, "_scalar_class")
super().__init__(data, vertices, tmin, tstep, subject, verbose)
def magnitude(self):
"""Compute magnitude of activity without directionality.
Returns
-------
stc : instance of SourceEstimate
The source estimate without directionality information.
"""
data_mag = np.linalg.norm(self.data, axis=1)
return self._scalar_class(
data_mag, self.vertices, self.tmin, self.tstep, self.subject
)
def _get_src_normals(self, src, use_cps):
normals = np.vstack(
[_get_src_nn(s, use_cps, v) for s, v in zip(src, self.vertices)]
)
return normals
@fill_doc
def project(self, directions, src=None, use_cps=True):
"""Project the data for each vertex in a given direction.
Parameters
----------
directions : ndarray, shape (n_vertices, 3) | str
Can be:
- ``'normal'``
Project onto the source space normals.
- ``'pca'``
SVD will be used to project onto the direction of maximal
power for each source.
- :class:`~numpy.ndarray`, shape (n_vertices, 3)
Projection directions for each source.
src : instance of SourceSpaces | None
The source spaces corresponding to the source estimate.
Not used when ``directions`` is an array, optional when
``directions='pca'``.
%(use_cps)s
Should be the same value that was used when the forward model
was computed (typically True).
Returns
-------
stc : instance of SourceEstimate
The projected source estimate.
directions : ndarray, shape (n_vertices, 3)
The directions that were computed (or just used).
Notes
-----
When using SVD, there is a sign ambiguity for the direction of maximal
power. When ``src is None``, the direction is chosen that makes the
resulting time waveform sum positive (i.e., have positive amplitudes).
When ``src`` is provided, the directions are flipped in the direction
of the source normals, i.e., outward from cortex for surface source
spaces and in the +Z / superior direction for volume source spaces.
.. versionadded:: 0.21
"""
_validate_type(directions, (str, np.ndarray), "directions")
_validate_type(src, (None, SourceSpaces), "src")
if isinstance(directions, str):
_check_option("directions", directions, ("normal", "pca"), extra="when str")
if directions == "normal":
if src is None:
raise ValueError('If directions="normal", src cannot be None')
_check_src_normal("normal", src)
directions = self._get_src_normals(src, use_cps)
else:
assert directions == "pca"
x = self.data
if not np.isrealobj(self.data):
_check_option(
"stc.data.dtype", self.data.dtype, (np.complex64, np.complex128)
)
dtype = np.float32 if x.dtype == np.complex64 else np.float64
x = x.view(dtype)
assert x.shape[-1] == 2 * self.data.shape[-1]
u, _, v = np.linalg.svd(x, full_matrices=False)
directions = u[:, :, 0]
# The sign is arbitrary, so let's flip it in the direction that
# makes the resulting time series the most positive:
if src is None:
signs = np.sum(v[:, 0].real, axis=1, keepdims=True)
else:
normals = self._get_src_normals(src, use_cps)
signs = np.sum(directions * normals, axis=1, keepdims=True)
assert signs.shape == (self.data.shape[0], 1)
signs = np.sign(signs)
signs[signs == 0] = 1.0
directions *= signs
_check_option("directions.shape", directions.shape, [(self.data.shape[0], 3)])
data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0]
stc = self._scalar_class(
data_norm, self.vertices, self.tmin, self.tstep, self.subject
)
return stc, directions
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot(
self,
subject=None,
hemi="lh",
colormap="hot",
time_label="auto",
smoothing_steps=10,
transparent=True,
brain_alpha=0.4,
overlay_alpha=None,
vector_alpha=1.0,
scale_factor=None,
time_viewer="auto",
subjects_dir=None,
figure=None,
views="lateral",
colorbar=True,
clim="auto",
cortex="classic",
size=800,
background="black",
foreground=None,
initial_time=None,
time_unit="s",
show_traces="auto",
src=None,
volume_options=1.0,
view_layout="vertical",
add_data_kwargs=None,
brain_kwargs=None,
verbose=None,
): # noqa: D102
return plot_vector_source_estimates(
self,
subject=subject,
hemi=hemi,
colormap=colormap,
time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent,
brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha,
vector_alpha=vector_alpha,
scale_factor=scale_factor,
time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure,
views=views,
colorbar=colorbar,
clim=clim,
cortex=cortex,
size=size,
background=background,
foreground=foreground,
initial_time=initial_time,
time_unit=time_unit,
show_traces=show_traces,
src=src,
volume_options=volume_options,
view_layout=view_layout,
add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs,
verbose=verbose,
)
class _BaseVolSourceEstimate(_BaseSourceEstimate):
_src_type = "volume"
_src_count = None
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot_3d(
self,
subject=None,
surface="white",
hemi="both",
colormap="auto",
time_label="auto",
smoothing_steps=10,
transparent=True,
alpha=0.1,
time_viewer="auto",
subjects_dir=None,
figure=None,
views="axial",
colorbar=True,
clim="auto",
cortex="classic",
size=800,
background="black",
foreground=None,
initial_time=None,
time_unit="s",
backend="auto",
spacing="oct6",
title=None,
show_traces="auto",
src=None,
volume_options=1.0,
view_layout="vertical",
add_data_kwargs=None,
brain_kwargs=None,
verbose=None,
):
return super().plot(
subject=subject,
surface=surface,
hemi=hemi,
colormap=colormap,
time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent,
alpha=alpha,
time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure,
views=views,
colorbar=colorbar,
clim=clim,
cortex=cortex,
size=size,
background=background,
foreground=foreground,
initial_time=initial_time,
time_unit=time_unit,
backend=backend,
spacing=spacing,
title=title,
show_traces=show_traces,
src=src,
volume_options=volume_options,
view_layout=view_layout,
add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs,
verbose=verbose,
)
@copy_function_doc_to_method_doc(plot_volume_source_estimates)
def plot(
self,
src,
subject=None,
subjects_dir=None,
mode="stat_map",
bg_img="T1.mgz",
colorbar=True,
colormap="auto",
clim="auto",
transparent="auto",
show=True,
initial_time=None,
initial_pos=None,
verbose=None,
):
data = self.magnitude() if self._data_ndim == 3 else self
return plot_volume_source_estimates(
data,
src=src,
subject=subject,
subjects_dir=subjects_dir,
mode=mode,
bg_img=bg_img,
colorbar=colorbar,
colormap=colormap,
clim=clim,
transparent=transparent,
show=show,
initial_time=initial_time,
initial_pos=initial_pos,
verbose=verbose,
)
# Override here to provide the volume-specific options
@verbose
def extract_label_time_course(
self,
labels,
src,
mode="auto",
allow_empty=False,
*,
mri_resolution=True,
verbose=None,
):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(labels_eltc)s
%(src_eltc)s
%(mode_eltc)s
%(allow_empty_eltc)s
%(mri_resolution_eltc)s
%(verbose)s
Returns
-------
%(label_tc_el_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self,
labels,
src,
mode=mode,
return_generator=False,
allow_empty=allow_empty,
mri_resolution=mri_resolution,
verbose=verbose,
)
@verbose
def in_label(self, label, mri, src, *, verbose=None):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : str | int
The label to use. Can be the name of a label if using a standard
FreeSurfer atlas, or an integer value to extract from the ``mri``.
mri : str
Path to the atlas to use.
src : instance of SourceSpaces
The volumetric source space. It must be a single, whole-brain
volume.
%(verbose)s
Returns
-------
stc : VolSourceEstimate | VolVectorSourceEstimate
The source estimate restricted to the given label.
Notes
-----
.. versionadded:: 0.21.0
"""
if len(self.vertices) != 1:
raise RuntimeError(
"This method can only be used with whole-brain " "volume source spaces"
)
_validate_type(label, (str, "int-like"), "label")
if isinstance(label, str):
volume_label = [label]
else:
volume_label = {"Volume ID %s" % (label): _ensure_int(label)}
label = _volume_labels(src, (mri, volume_label), mri_resolution=False)
assert len(label) == 1
label = label[0]
vertices = label.vertices
keep = np.in1d(self.vertices[0], label.vertices)
values, vertices = self.data[keep], [self.vertices[0][keep]]
label_stc = self.__class__(
values,
vertices=vertices,
tmin=self.tmin,
tstep=self.tstep,
subject=self.subject,
)
return label_stc
@verbose
def save_as_volume(
self,
fname,
src,
dest="mri",
mri_resolution=False,
format="nifti1",
*,
overwrite=False,
verbose=None,
):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : path-like
The name of the generated nifti file.
src : list
The list of source spaces (should all be of type volume).
dest : ``'mri'`` | ``'surf'``
If ``'mri'`` the volume is defined in the coordinate system of
the original T1 image. If ``'surf'`` the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning: If you have many time points the file produced can be
huge. The default is ``mri_resolution=False``.
format : str
Either ``'nifti1'`` (default) or ``'nifti2'``.
.. versionadded:: 0.17
%(overwrite)s
.. versionadded:: 1.0
%(verbose)s
.. versionadded:: 1.0
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
nib = _import_nibabel()
fname = _check_fname(fname=fname, overwrite=overwrite)
img = self.as_volume(
src, dest=dest, mri_resolution=mri_resolution, format=format
)
nib.save(img, fname)
def as_volume(self, src, dest="mri", mri_resolution=False, format="nifti1"):
"""Export volume source estimate as a nifti object.
Parameters
----------
src : instance of SourceSpaces
The source spaces (should all be of type volume, or part of a
mixed source space).
dest : ``'mri'`` | ``'surf'``
If ``'mri'`` the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning: If you have many time points the file produced can be
huge. The default is ``mri_resolution=False``.
format : str
Either 'nifti1' (default) or 'nifti2'.
Returns
-------
img : instance of Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
from .morph import _interpolate_data
data = self.magnitude() if self._data_ndim == 3 else self
return _interpolate_data(
data, src, mri_resolution=mri_resolution, mri_space=True, output=format
)
@fill_doc
class VolSourceEstimate(_BaseVolSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
%(vertices_volume)s
%(tmin)s
%(tstep)s
%(subject_optional)s
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
%(vertices_volume)s
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector surface source estimates.
VolVectorSourceEstimate : A container for volume vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def save(self, fname, ftype="stc", *, overwrite=False, verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : path-like
The stem of the file name. The stem is extended with ``"-vl.stc"``
or ``"-vl.w"``.
ftype : str
File format to use. Allowed values are ``"stc"`` (default),
``"w"``, and ``"h5"``. The ``"w"`` format only supports a single
time point.
%(overwrite)s
.. versionadded:: 1.0
%(verbose)s
"""
# check overwrite individually below
fname = str(_check_fname(fname=fname, overwrite=True)) # checked below
_check_option("ftype", ftype, ["stc", "w", "h5"])
if ftype != "h5" and len(self.vertices) != 1:
raise ValueError(
"Can only write to .stc or .w if a single volume "
"source space was used, use .h5 instead"
)
if ftype != "h5" and self.data.dtype == "complex":
raise ValueError(
"Can only write non-complex data to .stc or .w" ", use .h5 instead"
)
if ftype == "stc":
logger.info("Writing STC to disk...")
if not fname.endswith(("-vl.stc", "-vol.stc")):
fname += "-vl.stc"
fname = str(_check_fname(fname, overwrite=overwrite))
_write_stc(
fname,
tmin=self.tmin,
tstep=self.tstep,
vertices=self.vertices[0],
data=self.data,
)
elif ftype == "w":
logger.info("Writing STC to disk (w format)...")
if not fname.endswith(("-vl.w", "-vol.w")):
fname += "-vl.w"
fname = str(_check_fname(fname, overwrite=overwrite))
_write_w(fname, vertices=self.vertices[0], data=self.data[:, 0])
elif ftype == "h5":
super().save(fname, "h5", overwrite=overwrite)
logger.info("[done]")
@fill_doc
class VolVectorSourceEstimate(_BaseVolSourceEstimate, _BaseVectorSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
%(vertices_volume)s
%(tmin)s
%(tstep)s
%(subject_optional)s
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
%(vertices_volume)s
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
_scalar_class = VolSourceEstimate
# defaults differ: hemi='both', views='axial'
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot_3d(
self,
subject=None,
hemi="both",
colormap="hot",
time_label="auto",
smoothing_steps=10,
transparent=True,
brain_alpha=0.4,
overlay_alpha=None,
vector_alpha=1.0,
scale_factor=None,
time_viewer="auto",
subjects_dir=None,
figure=None,
views="axial",
colorbar=True,
clim="auto",
cortex="classic",
size=800,
background="black",
foreground=None,
initial_time=None,
time_unit="s",
show_traces="auto",
src=None,
volume_options=1.0,
view_layout="vertical",
add_data_kwargs=None,
brain_kwargs=None,
verbose=None,
): # noqa: D102
return _BaseVectorSourceEstimate.plot(
self,
subject=subject,
hemi=hemi,
colormap=colormap,
time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent,
brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha,
vector_alpha=vector_alpha,
scale_factor=scale_factor,
time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure,
views=views,
colorbar=colorbar,
clim=clim,
cortex=cortex,
size=size,
background=background,
foreground=foreground,
initial_time=initial_time,
time_unit=time_unit,
show_traces=show_traces,
src=src,
volume_options=volume_options,
view_layout=view_layout,
add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs,
verbose=verbose,
)
@fill_doc
class VectorSourceEstimate(_BaseVectorSourceEstimate, _BaseSurfaceSourceEstimate):
"""Container for vector surface source estimates.
For each vertex, the magnitude of the current is defined in the X, Y and Z
directions.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
%(tmin)s
%(tstep)s
%(subject_optional)s
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.15
"""
_scalar_class = SourceEstimate
###############################################################################
# Mixed source estimate (two cortical surfs plus other stuff)
class _BaseMixedSourceEstimate(_BaseSourceEstimate):
_src_type = "mixed"
_src_count = None
@verbose
def __init__(
self, data, vertices=None, tmin=None, tstep=None, subject=None, verbose=None
): # noqa: D102
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError(
"Vertices must be a list of numpy arrays with "
"one array per source space."
)
super().__init__(
data,
vertices=vertices,
tmin=tmin,
tstep=tstep,
subject=subject,
verbose=verbose,
)
@property
def _n_surf_vert(self):
return sum(len(v) for v in self.vertices[:2])
def surface(self):
"""Return the cortical surface source estimate.
Returns
-------
stc : instance of SourceEstimate or VectorSourceEstimate
The surface source estimate.
"""
if self._data_ndim == 3:
klass = VectorSourceEstimate
else:
klass = SourceEstimate
return klass(
self.data[: self._n_surf_vert],
self.vertices[:2],
self.tmin,
self.tstep,
self.subject,
)
def volume(self):
"""Return the volume surface source estimate.
Returns
-------
stc : instance of VolSourceEstimate or VolVectorSourceEstimate
The volume source estimate.
"""
if self._data_ndim == 3:
klass = VolVectorSourceEstimate
else:
klass = VolSourceEstimate
return klass(
self.data[self._n_surf_vert :],
self.vertices[2:],
self.tmin,
self.tstep,
self.subject,
)
@fill_doc
class MixedSourceEstimate(_BaseMixedSourceEstimate):
"""Container for mixed surface and volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
%(tmin)s
%(tstep)s
%(subject_optional)s
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector surface source estimates.
VolSourceEstimate : A container for volume source estimates.
VolVectorSourceEstimate : A container for Volume vector source estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@fill_doc
class MixedVectorSourceEstimate(_BaseVectorSourceEstimate, _BaseMixedSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array, shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (n_src,)
Vertex numbers corresponding to the data.
%(tmin)s
%(tstep)s
%(subject_optional)s
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array, shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.21.0
"""
_scalar_class = MixedSourceEstimate
###############################################################################
# Morphing
def _get_vol_mask(src):
"""Get the volume source space mask."""
assert len(src) == 1 # not a mixed source space
shape = src[0]["shape"][::-1]
mask = np.zeros(shape, bool)
mask.flat[src[0]["vertno"]] = True
return mask
def _spatio_temporal_src_adjacency_vol(src, n_times):
from sklearn.feature_extraction import grid_to_graph
mask = _get_vol_mask(src)
edges = grid_to_graph(*mask.shape, mask=mask)
adjacency = _get_adjacency_from_edges(edges, n_times)
return adjacency
def _spatio_temporal_src_adjacency_surf(src, n_times):
if src[0]["use_tris"] is None:
# XXX It would be nice to support non oct source spaces too...
raise RuntimeError(
"The source space does not appear to be an ico "
"surface. adjacency cannot be extracted from"
" non-ico source spaces."
)
used_verts = [np.unique(s["use_tris"]) for s in src]
offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]
tris = np.concatenate(
[
np.searchsorted(u_v, s["use_tris"]) + off
for u_v, s, off in zip(used_verts, src, offs)
]
)
adjacency = spatio_temporal_tris_adjacency(tris, n_times)
# deal with source space only using a subset of vertices
masks = [np.in1d(u, s["vertno"]) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != adjacency.shape[0] / n_times:
raise ValueError("Used vertices do not match adjacency shape")
if [np.sum(m) for m in masks] != [len(s["vertno"]) for s in src]:
raise ValueError("Vertex mask does not match number of vertices")
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn(
"%0.1f%% of original source space vertices have been"
" omitted, tri-based adjacency will have holes.\n"
"Consider using distance-based adjacency or "
"morphing data to all source space vertices." % missing
)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
adjacency = adjacency.tocsr()
adjacency = adjacency[masks]
adjacency = adjacency[:, masks]
# return to original format
adjacency = adjacency.tocoo()
return adjacency
@verbose
def spatio_temporal_src_adjacency(src, n_times, dist=None, verbose=None):
"""Compute adjacency for a source space activation over time.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
# XXX we should compute adjacency for each source space and then
# use scipy.sparse.block_diag to concatenate them
if src[0]["type"] == "vol":
if dist is not None:
raise ValueError(
"dist must be None for a volume " "source space. Got %s." % dist
)
adjacency = _spatio_temporal_src_adjacency_vol(src, n_times)
elif dist is not None:
# use distances computed and saved in the source space file
adjacency = spatio_temporal_dist_adjacency(src, n_times, dist)
else:
adjacency = _spatio_temporal_src_adjacency_surf(src, n_times)
return adjacency
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade.
Parameters
----------
grade : int
Grade of an icosahedral mesh.
%(verbose)s
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_adjacency.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False, verbose=None):
"""Compute adjacency from triangles and time instants.
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info("Reassigning vertex indices.")
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris)
edges = (edges + sparse.eye(edges.shape[0], format="csr")).tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None):
"""Compute adjacency from distances in a source space and time instants.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
n_times : int
Number of time points.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]["dist"] is None:
raise RuntimeError(
"src must have distances included, consider using "
"setup_source_space with add_dist=True"
)
blocks = [s["dist"][s["vertno"], :][:, s["vertno"]] for s in src]
# Ensure we keep explicit zeros; deal with changes in SciPy
for block in blocks:
if isinstance(block, np.ndarray):
block[block == 0] = -np.inf
else:
block.data[block.data == 0] == -1
edges = sparse.block_diag(blocks)
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatial_src_adjacency(src, dist=None, verbose=None):
"""Compute adjacency for a source space activation.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_src_adjacency(src, 1, dist)
@verbose
def spatial_tris_adjacency(tris, remap_vertices=False, verbose=None):
"""Compute adjacency from triangles.
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_adjacency(tris, 1, remap_vertices)
@verbose
def spatial_dist_adjacency(src, dist, verbose=None):
"""Compute adjacency from distances in a source space.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_adjacency(src, 1, dist)
@verbose
def spatial_inter_hemi_adjacency(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere.
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric adjacency matrix, e.g. computed
using geodesic distances.
"""
src = _ensure_src(src, kind="surface")
adj = cdist(src[0]["rr"][src[0]["vertno"]], src[1]["rr"][src[1]["vertno"]])
adj = sparse.csr_matrix(adj <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in adj.shape]
adj = sparse.vstack(
[sparse.hstack([empties[0], adj]), sparse.hstack([adj.T, empties[1]])]
)
return adj
@verbose
def _get_adjacency_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create adjacency matrix."""
n_vertices = edges.shape[0]
logger.info("-- number of adjacent vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz))
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (
n_vertices * np.arange(n_times - 1)[:, None]
+ np.arange(n_vertices)[None, :]
).ravel()
d = (
n_vertices * np.arange(1, n_times)[:, None] + np.arange(n_vertices)[None, :]
).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(
edges.data.size * n_times + 2 * n_vertices * (n_times - 1), dtype=np.int64
)
adjacency = sparse.coo_matrix((data, (row, col)), shape=(n_times * n_vertices,) * 2)
return adjacency
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico["tris"]
else:
return ico
def _pca_flip(flip, data):
U, s, V = _safe_svd(data, full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = np.linalg.norm(s) / np.sqrt(len(data))
return sign * scale * V[0]
_label_funcs = {
"mean": lambda flip, data: np.mean(data, axis=0),
"mean_flip": lambda flip, data: np.mean(flip * data, axis=0),
"max": lambda flip, data: np.max(np.abs(data), axis=0),
"pca_flip": _pca_flip,
}
@contextlib.contextmanager
def _temporary_vertices(src, vertices):
orig_vertices = [s["vertno"] for s in src]
for s, v in zip(src, vertices):
s["vertno"] = v
try:
yield
finally:
for s, v in zip(src, orig_vertices):
s["vertno"] = v
def _check_stc_src(stc, src):
if stc is not None and src is not None:
_check_subject(
src._subject,
stc.subject,
raise_error=False,
first_kind="source space subject",
second_kind="stc.subject",
)
for s, v, hemi in zip(src, stc.vertices, ("left", "right")):
n_missing = (~np.in1d(v, s["vertno"])).sum()
if n_missing:
raise ValueError(
"%d/%d %s hemisphere stc vertices "
"missing from the source space, likely "
"mismatch" % (n_missing, len(v), hemi)
)
def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse):
"""Prepare indices and flips for extract_label_time_course."""
# If src is a mixed src space, the first 2 src spaces are surf type and
# the other ones are vol type. For mixed source space n_labels will be
# given by the number of ROIs of the cortical parcellation plus the number
# of vol src space.
# If stc=None (i.e. no activation time courses provided) and mode='mean',
# only computes vertex indices and label_flip will be list of None.
from .label import label_sign_flip, Label, BiHemiLabel
# if source estimate provided in stc, get vertices from source space and
# check that they are the same as in the stcs
_check_stc_src(stc, src)
vertno = [s["vertno"] for s in src] if stc is None else stc.vertices
nvert = [len(vn) for vn in vertno]
# initialization
label_flip = list()
label_vertidx = list()
bad_labels = list()
for li, label in enumerate(labels):
subject = label["subject"] if use_sparse else label.subject
# stc and src can each be None
_check_subject(
subject,
getattr(stc, "subject", None),
raise_error=False,
first_kind="label.subject",
second_kind="stc.subject",
)
_check_subject(
subject,
getattr(src, "_subject", None),
raise_error=False,
first_kind="label.subject",
second_kind="source space subject",
)
if use_sparse:
assert isinstance(label, dict)
vertidx = label["csr"]
# This can happen if some labels aren't present in the space
if vertidx.shape[0] == 0:
bad_labels.append(label["name"])
vertidx = None
# Efficiency shortcut: use linearity early to avoid redundant
# calculations
elif mode == "mean":
vertidx = sparse.csr_matrix(vertidx.mean(axis=0))
label_vertidx.append(vertidx)
label_flip.append(None)
continue
# standard case
_validate_type(label, (Label, BiHemiLabel), "labels[%d]" % (li,))
if label.hemi == "both":
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == "lh":
this_vertices = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertices)
elif slabel.hemi == "rh":
this_vertices = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)
else:
raise ValueError("label %s has invalid hemi" % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
this_flip = None
if len(this_vertidx) == 0:
bad_labels.append(label.name)
this_vertidx = None # to later check if label is empty
elif mode not in ("mean", "max"): # mode-dependent initialization
# label_sign_flip uses two properties:
#
# - src[ii]['nn']
# - src[ii]['vertno']
#
# So if we override vertno with the stc vertices, it will pick
# the correct normals.
with _temporary_vertices(src, stc.vertices):
this_flip = label_sign_flip(label, src[:2])[:, None]
label_vertidx.append(this_vertidx)
label_flip.append(this_flip)
if len(bad_labels):
msg = "source space does not contain any vertices for %d label%s:\n%s" % (
len(bad_labels),
_pl(bad_labels),
bad_labels,
)
if not allow_empty:
raise ValueError(msg)
else:
msg += "\nAssigning all-zero time series."
if allow_empty == "ignore":
logger.info(msg)
else:
warn(msg)
return label_vertidx, label_flip
def _vol_src_rr(src):
return apply_trans(
src[0]["src_mri_t"],
np.array(
[
d.ravel(order="F")
for d in np.meshgrid(
*(np.arange(s) for s in src[0]["shape"]), indexing="ij"
)
],
float,
).T,
)
def _volume_labels(src, labels, mri_resolution):
# This will create Label objects that should do the right thing for our
# given volumetric source space when used with extract_label_time_course
from .label import Label
assert src.kind == "volume"
subject = src._subject
extra = " when using a volume source space"
_import_nibabel("use volume atlas labels")
_validate_type(labels, ("path-like", list, tuple), "labels" + extra)
if _path_like(labels):
mri = labels
infer_labels = True
else:
if len(labels) != 2:
raise ValueError(
"labels, if list or tuple, must have length 2, "
"got %s" % (len(labels),)
)
mri, labels = labels
infer_labels = False
_validate_type(mri, "path-like", "labels[0]" + extra)
logger.info("Reading atlas %s" % (mri,))
vol_info = _get_mri_info_data(str(mri), data=True)
atlas_data = vol_info["data"]
atlas_values = np.unique(atlas_data)
if atlas_values.dtype.kind == "f": # MGZ will be 'i'
atlas_values = atlas_values[np.isfinite(atlas_values)]
if not (atlas_values == np.round(atlas_values)).all():
raise RuntimeError(
"Non-integer values present in atlas, cannot " "labelize"
)
atlas_values = np.round(atlas_values).astype(np.int64)
if infer_labels:
labels = {
k: v for k, v in read_freesurfer_lut()[0].items() if v in atlas_values
}
labels = _check_volume_labels(labels, mri, name="labels[1]")
assert isinstance(labels, dict)
del atlas_values
vox_mri_t = vol_info["vox_mri_t"]
want = src[0].get("vox_mri_t", None)
if want is None:
raise RuntimeError(
"Cannot use volumetric atlas if no mri was supplied during "
"source space creation"
)
vox_mri_t, want = vox_mri_t["trans"], want["trans"]
if not np.allclose(vox_mri_t, want, atol=1e-6):
raise RuntimeError(
"atlas vox_mri_t does not match that used to create the source " "space"
)
src_shape = tuple(src[0]["mri_" + k] for k in ("width", "height", "depth"))
atlas_shape = atlas_data.shape
if atlas_shape != src_shape:
raise RuntimeError(
"atlas shape %s does not match source space MRI "
"shape %s" % (atlas_shape, src_shape)
)
atlas_data = atlas_data.ravel(order="F")
if mri_resolution:
# Upsample then just index
out_labels = list()
nnz = 0
interp = src[0]["interpolator"]
# should be guaranteed by size checks above and our src interp code
assert interp.shape[0] == np.prod(src_shape)
assert interp.shape == (atlas_data.size, len(src[0]["rr"]))
interp = interp[:, src[0]["vertno"]]
for k, v in labels.items():
mask = atlas_data == v
csr = interp[mask]
out_labels.append(dict(csr=csr, name=k, subject=subject))
nnz += csr.shape[0] > 0
else:
# Use nearest values
vertno = src[0]["vertno"]
rr = _vol_src_rr(src)
del src
src_values = _get_atlas_values(vol_info, rr[vertno])
vertices = [vertno[src_values == val] for val in labels.values()]
out_labels = [
Label(v, hemi="lh", name=val, subject=subject)
for v, val in zip(vertices, labels.keys())
]
nnz = sum(len(v) != 0 for v in vertices)
logger.info(
"%d/%d atlas regions had at least one vertex "
"in the source space" % (nnz, len(out_labels))
)
return out_labels
def _get_default_label_modes():
return sorted(_label_funcs.keys()) + ["auto"]
def _get_allowed_label_modes(stc):
if isinstance(stc, (_BaseVolSourceEstimate, _BaseVectorSourceEstimate)):
return ("mean", "max", "auto")
else:
return _get_default_label_modes()
def _gen_extract_label_time_course(
stcs,
labels,
src,
*,
mode="mean",
allow_empty=False,
mri_resolution=True,
verbose=None,
):
# loop through source estimates and extract time series
if src is None and mode in ["mean", "max"]:
kind = "surface"
else:
_validate_type(src, SourceSpaces)
kind = src.kind
_check_option("mode", mode, _get_default_label_modes())
if kind in ("surface", "mixed"):
if not isinstance(labels, list):
labels = [labels]
use_sparse = False
else:
labels = _volume_labels(src, labels, mri_resolution)
use_sparse = bool(mri_resolution)
n_mode = len(labels) # how many processed with the given mode
n_mean = len(src[2:]) if kind == "mixed" else 0
n_labels = n_mode + n_mean
vertno = func = None
for si, stc in enumerate(stcs):
_validate_type(stc, _BaseSourceEstimate, "stcs[%d]" % (si,), "source estimate")
_check_option(
"mode",
mode,
_get_allowed_label_modes(stc),
"when using a vector and/or volume source estimate",
)
if isinstance(stc, (_BaseVolSourceEstimate, _BaseVectorSourceEstimate)):
mode = "mean" if mode == "auto" else mode
else:
mode = "mean_flip" if mode == "auto" else mode
if vertno is None:
vertno = copy.deepcopy(stc.vertices) # avoid keeping a ref
nvert = np.array([len(v) for v in vertno])
label_vertidx, src_flip = _prepare_label_extraction(
stc, labels, src, mode, allow_empty, use_sparse
)
func = _label_funcs[mode]
# make sure the stc is compatible with the source space
if len(vertno) != len(stc.vertices):
raise ValueError("stc not compatible with source space")
for vn, svn in zip(vertno, stc.vertices):
if len(vn) != len(svn):
raise ValueError(
"stc not compatible with source space. "
"stc has %s time series but there are %s "
"vertices in source space. Ensure you used "
"src from the forward or inverse operator, "
"as forward computation can exclude vertices." % (len(svn), len(vn))
)
if not np.array_equal(svn, vn):
raise ValueError("stc not compatible with source space")
logger.info(
"Extracting time courses for %d labels (mode: %s)" % (n_labels, mode)
)
# do the extraction
label_tc = np.zeros((n_labels,) + stc.data.shape[1:], dtype=stc.data.dtype)
for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):
if vertidx is not None:
if isinstance(vertidx, sparse.csr_matrix):
assert mri_resolution
assert vertidx.shape[1] == stc.data.shape[0]
this_data = np.reshape(stc.data, (stc.data.shape[0], -1))
this_data = vertidx @ this_data
this_data.shape = (this_data.shape[0],) + stc.data.shape[1:]
else:
this_data = stc.data[vertidx]
label_tc[i] = func(flip, this_data)
# extract label time series for the vol src space (only mean supported)
offset = nvert[:-n_mean].sum() # effectively :2 or :0
for i, nv in enumerate(nvert[2:]):
if nv != 0:
v2 = offset + nv
label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0)
offset = v2
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(
stcs,
labels,
src,
mode="auto",
allow_empty=False,
return_generator=False,
*,
mri_resolution=True,
verbose=None,
):
"""Extract label time course for lists of labels and source estimates.
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter (see Notes).
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
%(labels_eltc)s
%(src_eltc)s
%(mode_eltc)s
%(allow_empty_eltc)s
return_generator : bool
If True, a generator instead of a list is returned.
%(mri_resolution_eltc)s
%(verbose)s
Returns
-------
%(label_tc_el_returns)s
Notes
-----
%(eltc_mode_notes)s
If encountering a ``ValueError`` due to mismatch between number of
source points in the subject source space and computed ``stc`` object set
``src`` argument to ``fwd['src']`` or ``inv['src']`` to ensure the source
space is the one actually used by the inverse to compute the source
time courses.
"""
# convert inputs to lists
if not isinstance(stcs, (list, tuple, GeneratorType)):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
label_tc = _gen_extract_label_time_course(
stcs,
labels,
src,
mode=mode,
allow_empty=allow_empty,
mri_resolution=mri_resolution,
)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
@verbose
def stc_near_sensors(
evoked,
trans,
subject,
distance=0.01,
mode="sum",
project=True,
subjects_dir=None,
src=None,
picks=None,
surface="pial",
verbose=None,
):
"""Create a STC from ECoG, sEEG and DBS sensor data.
Parameters
----------
evoked : instance of Evoked
The evoked data. Must contain ECoG, sEEG or DBS channels.
%(trans)s
.. versionchanged:: 0.19
Support for 'fsaverage' argument.
subject : str
The subject name.
distance : float
Distance (m) defining the activation "ball" of the sensor.
mode : str
Can be ``"sum"`` to do a linear sum of weights, ``"weighted"`` to make
this a weighted sum, ``"nearest"`` to use only the weight of the
nearest sensor, or ``"single"`` to do a distance-weight of the nearest
sensor. Default is ``"sum"``. See Notes.
.. versionchanged:: 0.24
Added "weighted" option.
project : bool
If True, project the sensors to the nearest ``'pial`` surface
vertex before computing distances. Only used when doing a
surface projection.
%(subjects_dir)s
src : instance of SourceSpaces
The source space.
.. warning:: If a surface source space is used, make sure that
``surface='pial'`` was used during construction,
or that you set ``surface='pial'`` here.
%(picks_base)s good sEEG, ECoG, and DBS channels.
.. versionadded:: 0.24
surface : str | None
The surface to use if ``src=None``. Default is the pial surface.
If None, the source space surface will be used.
.. versionadded:: 0.24.1
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
The surface source estimate. If src is None, a surface source
estimate will be produced, and the number of vertices will equal
the number of pial-surface vertices that were close enough to
the sensors to take on a non-zero volue. If src is not None,
a surface, volume, or mixed source estimate will be produced
(depending on the kind of source space passed) and the
vertices will match those of src (i.e., there may be me
many all-zero values in stc.data).
Notes
-----
For surface projections, this function projects the ECoG sensors to
the pial surface (if ``project``), then the activation at each pial
surface vertex is given by the mode:
- ``'sum'``
Activation is the sum across each sensor weighted by the fractional
``distance`` from each sensor. A sensor with zero distance gets weight
1 and a sensor at ``distance`` meters away (or larger) gets weight 0.
If ``distance`` is less than half the distance between any two
sensors, this will be the same as ``'single'``.
- ``'single'``
Same as ``'sum'`` except that only the nearest sensor is used,
rather than summing across sensors within the ``distance`` radius.
As ``'nearest'`` for vertices with distance zero to the projected
sensor.
- ``'nearest'``
The value is given by the value of the nearest sensor, up to a
``distance`` (beyond which it is zero).
- ``'weighted'``
The value is given by the same as ``sum`` but the total weight for
each vertex is 1. (i.e., it's a weighted sum based on proximity).
If creating a Volume STC, ``src`` must be passed in, and this
function will project sEEG and DBS sensors to nearby surrounding vertices.
Then the activation at each volume vertex is given by the mode
in the same way as ECoG surface projections.
.. versionadded:: 0.22
"""
from .evoked import Evoked
_validate_type(evoked, Evoked, "evoked")
_validate_type(mode, str, "mode")
_validate_type(src, (None, SourceSpaces), "src")
_check_option("mode", mode, ("sum", "single", "nearest", "weighted"))
# create a copy of Evoked using ecog, seeg and dbs
if picks is None:
picks = pick_types(evoked.info, ecog=True, seeg=True, dbs=True)
evoked = evoked.copy().pick(picks)
frames = set(evoked.info["chs"][pick]["coord_frame"] for pick in picks)
if not frames == {FIFF.FIFFV_COORD_HEAD}:
raise RuntimeError(
"Channels must be in the head coordinate frame, " f"got {sorted(frames)}"
)
# get channel positions that will be used to pinpoint where
# in the Source space we will use the evoked data
pos = evoked._get_channel_positions()
# remove nan channels
nan_inds = np.where(np.isnan(pos).any(axis=1))[0]
nan_chs = [evoked.ch_names[idx] for idx in nan_inds]
if len(nan_chs):
evoked.drop_channels(nan_chs)
pos = [pos[idx] for idx in range(len(pos)) if idx not in nan_inds]
# coord_frame transformation from native mne "head" to MRI coord_frame
trans, _ = _get_trans(trans, "head", "mri", allow_none=True)
# convert head positions -> coord_frame MRI
pos = apply_trans(trans, pos)
subject = _check_subject(None, subject, raise_error=False)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if surface is not None:
surf_rr = [
read_surface(subjects_dir / subject / "surf" / f"{hemi}.{surface}")[0]
/ 1000.0
for hemi in ("lh", "rh")
]
if src is None: # fake a full surface one
_validate_type(surface, str, "surface", "when src is None")
src = SourceSpaces(
[
dict(
rr=rr,
vertno=np.arange(len(rr)),
type="surf",
coord_frame=FIFF.FIFFV_COORD_MRI,
)
for rr in surf_rr
]
)
rrs = np.concatenate([s_rr[s["vertno"]] for s_rr, s in zip(surf_rr, src)])
keep_all = False
else:
if surface is None:
rrs = np.concatenate([s["rr"][s["vertno"]] for s in src])
if src[0]["coord_frame"] == FIFF.FIFFV_COORD_HEAD:
rrs = apply_trans(trans, rrs)
else:
rrs = np.concatenate([s_rr[s["vertno"]] for s_rr, s in zip(surf_rr, src)])
keep_all = True
# ensure it's a usable one
klass = dict(
surface=SourceEstimate,
volume=VolSourceEstimate,
mixed=MixedSourceEstimate,
)
_check_option("src.kind", src.kind, sorted(klass.keys()))
klass = klass[src.kind]
# projection will only occur with surfaces
logger.info(
f"Projecting data from {len(pos)} sensor{_pl(pos)} onto {len(rrs)} "
f"{src.kind} vertices: {mode} mode"
)
if project and src.kind == "surface":
logger.info(" Projecting sensors onto surface")
pos = _project_onto_surface(
pos, dict(rr=rrs), project_rrs=True, method="nearest"
)[2]
min_dist = pdist(pos).min() * 1000
logger.info(
f' Minimum {"projected " if project else ""}intra-sensor distance: '
f"{min_dist:0.1f} mm"
)
# compute pairwise distance between source space points and sensors
dists = cdist(rrs, pos)
assert dists.shape == (len(rrs), len(pos))
# only consider vertices within our "epsilon-ball"
# characterized by distance kwarg
vertices = np.where((dists <= distance).any(-1))[0]
logger.info(f" {len(vertices)} / {len(rrs)} non-zero vertices")
w = np.maximum(1.0 - dists[vertices] / distance, 0)
# now we triage based on mode
if mode in ("single", "nearest"):
range_ = np.arange(w.shape[0])
idx = np.argmax(w, axis=1)
vals = w[range_, idx] if mode == "single" else 1.0
w.fill(0)
w[range_, idx] = vals
elif mode == "weighted":
norms = w.sum(-1, keepdims=True)
norms[norms == 0] = 1.0
w /= norms
missing = np.where(~np.any(w, axis=0))[0]
if len(missing):
warn(
f"Channel{_pl(missing)} missing in STC: "
f'{", ".join(evoked.ch_names[mi] for mi in missing)}'
)
nz_data = w @ evoked.data
if keep_all:
data = np.zeros(
(sum(len(s["vertno"]) for s in src), len(evoked.times)), dtype=nz_data.dtype
)
data[vertices] = nz_data
vertices = [s["vertno"].copy() for s in src]
else:
assert src.kind == "surface"
data = nz_data
offset = len(src[0]["vertno"])
vertices = [vertices[vertices < offset], vertices[vertices >= offset] - offset]
return klass(
data,
vertices,
evoked.times[0],
1.0 / evoked.info["sfreq"],
subject=subject,
verbose=verbose,
)
|
c5b7db9f833eb1e427fa18befaa014d46efcbd0d
|
498526dff7df92662c7bd59a7b9fe4e5a4999522
|
/examples/qualifier_overloading/qualifier_overloading_example.py
|
2ad3efb46fc86b5b95159777a06f0f35d95b65a9
|
[
"MIT"
] |
permissive
|
allrod5/injectable
|
eebf6ba54c385ece22982fd9f266a8a036d1bdf7
|
7cbd7bbf935eb9e62a9c1f34c58769c7e42217e8
|
refs/heads/master
| 2023-01-28T22:58:06.314991
| 2023-01-11T18:03:57
| 2023-01-11T18:03:57
| 120,220,195
| 101
| 13
|
MIT
| 2021-08-15T22:44:36
| 2018-02-04T20:26:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,109
|
py
|
qualifier_overloading_example.py
|
"""
In this example you'll learn about overloading qualifiers/classes for injection and how
to take advantage of that to inject multiple dependencies as a list of instances.
Overloading happens when two or more injectables are declared for a same qualifier
or class.
In this example we create a abstract base class ``SenderService`` and implement it in
other three classes, ``EmailSenderService``, ``SmsSenderService``, and
``FaxSenderService``. All the three concrete services are declared as injectables and
as injection declared class propagates to base classes we end up with three injectables
declared for the ``SenderService`` class.
In our ``QualifierOverloading`` example class we inject a list with all injectables
declared for the ``SenderService`` by using the :class:`typing.List` type. We also use
the ``exclude_groups`` parameter to filter out injectables that were declared with the
``"old"`` group label.
.. seealso::
The :ref:`dependencies_precedence_example` shows how dependency resolution works in
regards to precedence when a qualifier or class are resolved by multiple
injectables and you're injecting a single instance and not all matching injectables.
"""
# sphinx-start
from typing import List
from examples import Example
from examples.qualifier_overloading.sender_service import SenderService
from injectable import autowired, Autowired, load_injection_container
class QualifierOverloading(Example):
@autowired
def __init__(
self,
sender_services: Autowired(List[SenderService], exclude_groups=["old"]),
):
self.sender_services = sender_services
def send_message(self, message: str, recipient: str):
for sender_service in self.sender_services:
sender_service.send(message, recipient)
def run(self):
self.send_message(message="Hello!", recipient="World")
# Sending Email to World: Hello!
# Sending SMS to World: Hello!
def run_example():
load_injection_container()
example = QualifierOverloading()
example.run()
if __name__ == "__main__":
run_example()
|
f653cbdd5b13d656930a0e014933cf5f7fc448f4
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/native_client/src/untrusted/DEPS
|
b9d27310a64238b4de06a61907bb7b737c8c56df
|
[
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 494
|
DEPS
|
include_rules = [
"+native_client/src/public",
"+native_client/src/shared",
"+native_client/src/untrusted",
"+native_client/src/third_party/dlmalloc",
"+native_client/src/third_party/valgrind",
# For nacl/syscall_bindings_trampoline.h and a couple of tests:
"+native_client/src/trusted/service_runtime/include/bits/nacl_syscalls.h",
"+native_client/src/trusted/service_runtime/nacl_config.h",
# For various tests:
"+native_client/src/trusted/service_runtime/include/sys",
]
|
|
ba83db5aca7d31b49ac4d627070bb69540ad3323
|
e07eed9bfcc0d93406610e0df4a95385cd173c01
|
/gmond/python_modules/db/riak.py
|
908b480be7295273db03f3020c16ee46a7272ba2
|
[] |
permissive
|
ganglia/monitor-core
|
d0c3ed5fe89cfd75c9ac56bbbd1b477e759e719c
|
185ab6b3425d391727a19c380691c4ee42fdff69
|
refs/heads/master
| 2023-08-27T09:34:31.653118
| 2020-12-18T10:29:37
| 2021-11-21T04:48:08
| 2,018,548
| 378
| 183
|
BSD-3-Clause
| 2021-11-21T22:29:14
| 2011-07-08T15:48:34
|
C
|
UTF-8
|
Python
| false
| false
| 31,818
|
py
|
riak.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# [Riak](https://wiki.basho.com/display/RIAK/Riak) is a Dynamo-inspired
# key/value store.
#
# This module collects metrics from the JSON stats interface of riak, available
# at http://localhost:8098/stats. The statistics-aggregator must be enabled in
# your riak configuration for this to work:
# {riak_kv, [
# %% ...
# {riak_kv_stat, true},
# %% ...
# ]},
#
# You'll want to edit the url key in riak.conf to point at the interface your
# riak install is listening on:
#
# param url {
# value = "http://10.0.1.123:8098/stats"
# }
import os
import sys
import threading
import time
import urllib2
import traceback
import json
descriptors = list()
Desc_Skel = {}
_Worker_Thread = None
_Lock = threading.Lock() # synchronization lock
Debug = False
def dprint(f, *v):
if Debug:
print >>sys.stderr, "DEBUG: " + f % v
def floatable(str):
try:
float(str)
return True
except:
return False
class UpdateMetricThread(threading.Thread):
def __init__(self, params):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.refresh_rate = 30
if "refresh_rate" in params:
self.refresh_rate = int(params["refresh_rate"])
self.metric = {}
self.timeout = 10
self.url = "http://localhost:8098/stats"
if "url" in params:
self.url = params["url"]
self.mp = params["metrix_prefix"]
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
self.join()
def run(self):
self.running = True
while not self.shuttingdown:
_Lock.acquire()
self.update_metric()
_Lock.release()
time.sleep(self.refresh_rate)
self.running = False
def update_metric(self):
try:
req = urllib2.Request(url=self.url)
res = urllib2.urlopen(req)
stats = res.read()
dprint("%s", stats)
json_stats = json.loads(stats)
for (key, value) in json_stats.iteritems():
dprint("%s = %s", key, value)
if value == 'undefined':
self.metric[self.mp + '_' + key] = 0
else:
self.metric[self.mp + '_' + key] = value
except urllib2.URLError:
traceback.print_exc()
else:
res.close()
def metric_of(self, name):
val = 0
mp = name.split("_")[0]
if name in self.metric:
_Lock.acquire()
val = self.metric[name]
_Lock.release()
return val
def metric_init(params):
global descriptors, Desc_Skel, _Worker_Thread, Debug
if "metrix_prefix" not in params:
params["metrix_prefix"] = "riak"
print params
# initialize skeleton of descriptors
Desc_Skel = {
'name' : 'XXX',
'call_back' : metric_of,
'time_max' : 60,
'value_type' : 'uint',
'format' : '%u',
'units' : 'XXX',
'slope' : 'XXX', # zero|positive|negative|both
'description' : 'XXX',
'groups' : 'riak',
}
if "refresh_rate" not in params:
params["refresh_rate"] = 15
if "debug" in params:
Debug = params["debug"]
dprint("%s", "Debug mode on")
_Worker_Thread = UpdateMetricThread(params)
_Worker_Thread.start()
# IP:HOSTNAME
if "spoof_host" in params:
Desc_Skel["spoof_host"] = params["spoof_host"]
mp = params["metrix_prefix"]
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_node_get_fsm_siblings_100",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_node_get_fsm_siblings_95",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_node_get_fsm_siblings_99",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_node_get_fsm_siblings_mean",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_node_get_fsm_siblings_median",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_executing_mappers",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_active",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_active_60s",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_in_rate",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_active_60s",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_in_rate",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_out_rate",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_pipeline_active",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_pipeline_create_count",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_pipeline_create_error_count",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_pipeline_create_error_one",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_pipeline_create_one",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_index_fsm_active",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_index_fsm_create",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_index_fsm_create_error",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_list_fsm_active",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_list_fsm_create",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_list_fsm_create_error",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_out_rate",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_rejected",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_rejected_60s",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_siblings_100",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_active",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_rejected",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_rejected_60s",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_read_repairs_primary_notfound_count",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_read_repairs_primary_notfound_one",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_read_repairs_primary_outofdate_count",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_read_repairs_primary_outofdate_one",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_rebalance_delay_mean",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_rejected_handoffs",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_core_stat_ts",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_kv_stat_ts",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_pipe_stat_ts",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_pipe_vnodeq_mean",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_rings_reconciled",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_index_refreshes",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_puts",
"units": "puts",
"description": mp + "_node_puts"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_sys_logical_processors",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_sys_logical_processors"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_ignored_gossip_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_ignored_gossip_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_gets",
"units": "gets",
"description": mp + "_vnode_gets"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_objsize_100",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_objsize_100"
}))
descriptors.append(create_desc(Desc_Skel, {
"name": mp + "_node_put_fsm_time_mean",
"units": "microseconds",
"slope": "both",
"description": mp + "_node_put_fsm_time_mean",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_node_puts_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_puts_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_kv_vnodeq_mean",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_kv_vnodeq_mean"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_pipe_vnodes_running",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_pipe_vnodes_running"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_index_writes_postings",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_writes_postings"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_vnode_index_deletes_postings_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_deletes_postings_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_read_repairs_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_read_repairs_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_precommit_fail",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_precommit_fail"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_rebalance_delay_max",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_rebalance_delay_max"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_converge_delay_mean",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_converge_delay_mean"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_time_95",
"units": "microseconds",
"description": mp + "_node_get_fsm_time_95"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_pipe_vnodeq_median",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_pipe_vnodeq_median"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_time_99",
"value_type": "uint",
"units": "microseconds",
"description": mp + "_node_get_fsm_time_99"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_memory_code",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_memory_code"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_sys_smp_support",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_sys_smp_support"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_memory_processes_used",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_memory_processes_used"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_pbc_active",
"units": "connections",
"description": mp + "_pbc_active"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_vnode_index_writes_postings_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_writes_postings_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_vnode_gets_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_gets_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_sys_process_count",
"units": "processes",
"description": "Erlang processes"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_read_repairs",
"units": "repairs",
"description": mp + "_read_repairs"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_puts",
"units": "puts",
"description": mp + "_vnode_puts"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_search_vnodes_running",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_search_vnodes_running"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_pipe_vnodeq_min",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_pipe_vnodeq_min"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_objsize_mean",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_objsize_mean"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_cpu_nprocs",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_cpu_nprocs"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_time_100",
"units": "microseconds",
"description": mp + "_node_put_fsm_time_100"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_mem_total",
"format": "%.1f",
"value_type": "float",
"units": "bytes",
"description": mp + "_mem_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_search_vnodeq_mean",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_search_vnodeq_mean"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_siblings_median",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_siblings_median"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_search_vnodeq_max",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_search_vnodeq_max"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_sys_threads_enabled",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_sys_threads_enabled"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_index_writes",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_writes"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_memory_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_memory_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_pipe_vnodeq_max",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_pipe_vnodeq_max"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_index_deletes_postings",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_deletes_postings"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_time_mean",
"units": "microseconds",
"description": "Mean for riak_kv_get_fsm calls"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_ring_num_partitions",
"units": "vnodes",
"description": mp + "_ring_num_partitions"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_search_vnodeq_min",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_search_vnodeq_min"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_vnode_index_reads_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_reads_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_siblings_99",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_siblings_99"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_mem_allocated",
"format": "%.1f",
"value_type": "float",
"units": "bytes",
"description": mp + "_mem_allocated"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_siblings_95",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_siblings_95"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_objsize_95",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_objsize_95"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_time_median",
"units": "microseconds",
"description": mp + "_node_put_fsm_time_median"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_memory_processes",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_memory_processes"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_riak_search_vnodeq_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_search_vnodeq_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_objsize_99",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_objsize_99"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_memory_system",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_memory_system"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_sys_global_heaps_size",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_sys_global_heaps_size"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_gossip_received",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_gossip_received"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_time_median",
"units": "microseconds",
"description": mp + "_node_get_fsm_time_median"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_vnode_index_deletes_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_deletes_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_kv_vnodeq_max",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_kv_vnodeq_max"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_kv_vnodeq_min",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_kv_vnodeq_min"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_gets",
"units": "gets",
"description": mp + "_node_gets"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_kv_vnodeq_median",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_kv_vnodeq_median"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_postcommit_fail",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_postcommit_fail"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_search_vnodeq_median",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_search_vnodeq_median"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_sys_wordsize",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_sys_wordsize"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_riak_kv_vnodes_running",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_kv_vnodes_running"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_time_95",
"units": "microseconds",
"description": mp + "_node_put_fsm_time_95",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_rings_reconciled_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_rings_reconciled_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_rebalance_delay_min",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_rebalance_delay_min"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_put_fsm_time_99",
"units": "microseconds",
"description": mp + "_node_put_fsm_time_99",
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_ring_creation_size",
"units": "vnodes",
"description": mp + "_ring_creation_size"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_cpu_avg1",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_cpu_avg1"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_cpu_avg5",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_cpu_avg5"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_pbc_connects_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_pbc_connects_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_memory_binary",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_memory_binary"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_coord_redirs_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_coord_redirs_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_riak_pipe_vnodeq_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_pipe_vnodeq_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_node_gets_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_gets_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_vnode_puts_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_puts_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_memory_atom",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_memory_atom"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_cpu_avg15",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_cpu_avg15"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_objsize_median",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_objsize_median"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_siblings_mean",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_node_get_fsm_siblings_mean"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_converge_delay_min",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_converge_delay_min"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_node_get_fsm_time_100",
"units": "microseconds",
"description": mp + "_node_get_fsm_time_100"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_converge_delay_max",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_converge_delay_max"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_converge_delay_last",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_converge_delay_last"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_rebalance_delay_last",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_rebalance_delay_last"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_index_deletes",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_deletes"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "positive",
"name": mp + "_riak_kv_vnodeq_total",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_riak_kv_vnodeq_total"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_handoff_timeouts",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_handoff_timeouts"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_vnode_index_reads",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_vnode_index_reads"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_sys_thread_pool_size",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_sys_thread_pool_size"
}))
descriptors.append(create_desc(Desc_Skel, {
"slope": "both",
"name": mp + "_pbc_connects",
"format": "%u",
"value_type": "uint",
"units": "N",
"description": mp + "_pbc_connects"
}))
return descriptors
def create_desc(skel, prop):
d = skel.copy()
for k, v in prop.iteritems():
d[k] = v
return d
def metric_of(name):
return _Worker_Thread.metric_of(name)
def metric_cleanup():
_Worker_Thread.shutdown()
if __name__ == '__main__':
try:
params = {
"debug": True,
}
metric_init(params)
while True:
for d in descriptors:
v = d['call_back'](d['name'])
print ('value for %s is ' + d['format']) % (d['name'], v)
time.sleep(5)
except KeyboardInterrupt:
time.sleep(0.2)
os._exit(1)
except:
traceback.print_exc()
os._exit(1)
|
ae37b8f0d431480f13946a1d2f6d05d77e928b6e
|
754128745da0c3231fa46afb7873114a6c7831cd
|
/puput/migrations/0001_initial.py
|
a3ef8c32e9a385ecb62de7760f5720bffd937c8d
|
[
"MIT"
] |
permissive
|
APSL/puput
|
e64979df5ca821a6d7670ae0524abd3f9253bb94
|
da579fe57b0aa423ded56d683b0cb3ee2b536f6f
|
refs/heads/master
| 2023-08-27T22:14:51.207609
| 2023-07-04T07:24:28
| 2023-07-04T07:24:28
| 39,860,842
| 635
| 235
|
MIT
| 2023-07-11T15:48:12
| 2015-07-28T22:30:09
|
Python
|
UTF-8
|
Python
| false
| false
| 6,993
|
py
|
0001_initial.py
|
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.fields
import puput.routes
import datetime
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
('wagtailimages', '0006_add_verbose_names'),
]
operations = [
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page', on_delete=models.CASCADE)),
('description', models.CharField(help_text='The page description that will appear under the title.', max_length=255, verbose_name='Description', blank=True)),
('display_comments', models.BooleanField(default=False, verbose_name='Display comments')),
('display_categories', models.BooleanField(default=True, verbose_name='Display categories')),
('display_tags', models.BooleanField(default=True, verbose_name='Display tags')),
('display_popular_entries', models.BooleanField(default=True, verbose_name='Display popular entries')),
('display_last_entries', models.BooleanField(default=True, verbose_name='Display last entries')),
('display_archive', models.BooleanField(default=True, verbose_name='Display archive')),
('disqus_api_secret', models.TextField(blank=True)),
('disqus_shortname', models.CharField(max_length=128, blank=True)),
('num_entries_page', models.IntegerField(default=5, verbose_name='Entries per page')),
('num_last_entries', models.IntegerField(default=3, verbose_name='Last entries limit')),
('num_popular_entries', models.IntegerField(default=3, verbose_name='Popular entries limit')),
('num_tags_entry_header', models.IntegerField(default=5, verbose_name='Tags limit entry header')),
('header_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='Header image', blank=True, to='wagtailimages.Image', null=True)),
],
options={
'verbose_name': 'Blog',
},
bases=(puput.routes.BlogRoutes, 'wagtailcore.page'),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='Category Name')),
('slug', models.SlugField(unique=True, max_length=80)),
('description', models.CharField(max_length=500, blank=True)),
('parent', models.ForeignKey(related_name='children', blank=True, to='puput.Category', null=True, on_delete=models.SET_NULL)),
],
options={
'ordering': ['name'],
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CategoryEntryPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.ForeignKey(related_name='+', verbose_name='Category', to='puput.Category', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EntryPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page', on_delete=models.CASCADE)),
('body', wagtail.fields.RichTextField(verbose_name='body')),
('date', models.DateTimeField(default=datetime.datetime.today, verbose_name='Post date')),
('excerpt', wagtail.fields.RichTextField(help_text='Used to display on puput pages list. If this field is not filled, a truncate version of body text will be used.', verbose_name='excerpt', blank=True)),
('num_comments', models.IntegerField(default=0, editable=False)),
('categories', models.ManyToManyField(to='puput.Category', through='puput.CategoryEntryPage', blank=True)),
('header_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='Header image', blank=True, to='wagtailimages.Image', null=True)),
],
options={
'verbose_name': 'Entry',
'verbose_name_plural': 'Entries',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EntryPageRelated',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entrypage_from', modelcluster.fields.ParentalKey(related_name='related_entrypage_from', verbose_name='Entry', to='puput.EntryPage')),
('entrypage_to', modelcluster.fields.ParentalKey(related_name='related_entrypage_to', verbose_name='Entry', to='puput.EntryPage')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TagEntryPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name='entry_tags', to='puput.EntryPage')),
('tag', models.ForeignKey(related_name='puput_tagentrypage_items', to='taggit.Tag', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='entrypage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(to='taggit.Tag', through='puput.TagEntryPage', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
migrations.AddField(
model_name='categoryentrypage',
name='page',
field=modelcluster.fields.ParentalKey(related_name='entry_categories', to='puput.EntryPage'),
preserve_default=True,
),
migrations.CreateModel(
name='Tag',
fields=[
],
options={
'proxy': True,
},
bases=('taggit.tag',),
),
]
|
9a2f216a8cba39cadfbd380cde192d463eb719bb
|
57767ccd77d484ea60001f28d90270d1e34ac974
|
/glance/common/location_strategy/location_order.py
|
c044bc6250bc5b1f11bff5549bac95f944c82260
|
[
"Apache-2.0"
] |
permissive
|
openstack/glance
|
ff459174fb502ac9b5030ab2d1aafc2fa5e40475
|
11af8f0ed5fcd53ab3865a40ae50e467a0c06e6c
|
refs/heads/master
| 2023-09-04T06:55:03.257371
| 2023-09-02T03:13:30
| 2023-09-02T03:13:30
| 2,155,157
| 389
| 498
|
Apache-2.0
| 2020-12-15T05:36:34
| 2011-08-04T15:05:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
location_order.py
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Image location order based location strategy module"""
def get_strategy_name():
"""Return strategy module name."""
return 'location_order'
def init():
"""Initialize strategy module."""
pass
def get_ordered_locations(locations, **kwargs):
"""
Order image location list.
:param locations: The original image location list.
:returns: The image location list with original natural order.
"""
return locations
|
41c79ef880acc6e0b96f700288a77900c74e44fc
|
d1062421aed9448f583d1a084a073f12a5ac74e2
|
/sequana/modules_report/bamqc.py
|
8f15f786c32841bc0f490f3aae6adf2928b0f90d
|
[
"BSD-3-Clause"
] |
permissive
|
sequana/sequana
|
9d89cea55cf6987b832351ac35d34742620ce64a
|
8717094493d1993debd079f324c540541dece70f
|
refs/heads/main
| 2023-08-01T04:02:27.864027
| 2023-07-12T13:12:37
| 2023-07-12T13:12:37
| 53,329,678
| 155
| 41
|
BSD-3-Clause
| 2023-09-13T12:39:01
| 2016-03-07T14:00:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,062
|
py
|
bamqc.py
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
# Rachel Legendre <rachel.legendre@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Report dedicated to BAM file
.. autosummary::
BAMQCModule
"""
import os
from sequana.lazy import pandas as pd
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.bamtools import SAMFlags
from sequana import BAM
from sequana.lazy import pylab
from sequana.utils.datatables_js import DataTable
__all__ = ["BAMQCModule"]
class BAMQCModule(SequanaBaseModule):
"""Report dedicated to BAM file
::
from sequana import sequana_data
from sequana.modules_report.bamqc import BAMQCModule
filename = sequana_data("test.bam")
r = BAMQCModule(filename)
r.create_html("test.html")
# report/bam.html is now available
.. todo:: right now, the computation is performed in the class. Ideally,
we would like the computation to happen elsewhere, where a json is stored.
The json would be the input to this class.
"""
def __init__(self, bam_input, output_filename=None):
super().__init__()
self.bam_input = bam_input
self.title = "Bam Report"
self.create_report_content()
self.create_html(output_filename)
def create_report_content(self):
self.sections = list()
self.add_flag_section()
self.add_images_section()
def _computation(self):
self.bam = BAM(self.bam_input)
results = {}
results["alignment_count"] = len(self.bam)
# first, we store the flags
df = self.bam.get_flags_as_df().sum()
df = df.to_frame()
df.columns = ["counter"]
sf = SAMFlags()
df["meaning"] = sf.get_meaning()
df = df[["meaning", "counter"]]
results["flags"] = df
return results
self.bam.plot_bar_flags(
logy=False, filename=self.directory + os.sep + "bar_flags.png"
)
self.bam.plot_bar_mapq(filename=self.directory + os.sep + "bar_mapq.png")
def add_flag_section(self):
data = self._computation()
df = data["flags"]
datatable = DataTable(df, "flags", index=True)
datatable.datatable.datatable_options = {
"scrollX": "300px",
"pageLength": 15,
"scrollCollapse": "true",
"dom": "tB",
"paging": "false",
"buttons": ["copy", "csv"],
}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format="%.3g")
html = ""
html += "{} {}".format(html_tab, js)
self.sections.append(
{"name": "Flags information", "anchor": "flags", "content": html}
)
def add_images_section(self):
style = "width:65%"
import pylab
pylab.ioff()
def plotter1(filename):
self.bam.plot_bar_flags(logy=True, filename=filename)
html1 = self.create_embedded_png(plotter1, "filename", style=style)
def plotter2(filename):
self.bam.plot_bar_flags(logy=False, filename=filename)
html2 = self.create_embedded_png(plotter2, "filename", style=style)
def plotter3(filename):
self.bam.plot_bar_mapq(filename=filename)
html3 = self.create_embedded_png(plotter3, "filename", style=style)
self.sections.append(
{"name": "Image", "anchor": "table", "content": html1 + html2 + html3}
)
|
5db006d48cd8c85ad50fe468f96d0421762bb352
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/lib/sqlalchemy/util/_collections.py
|
2e793e862b79e515021bc31ca57b46ebcc4fffa3
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 20,401
|
py
|
_collections.py
|
# util/_collections.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: allow-untyped-defs, allow-untyped-calls
"""Collection classes and helpers."""
from __future__ import annotations
import collections.abc as collections_abc
import operator
import threading
import types
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import FrozenSet
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import NoReturn
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
from typing import ValuesView
import weakref
from ._has_cy import HAS_CYEXTENSION
from .typing import Literal
from .typing import Protocol
if typing.TYPE_CHECKING or not HAS_CYEXTENSION:
from ._py_collections import immutabledict as immutabledict
from ._py_collections import IdentitySet as IdentitySet
from ._py_collections import ReadOnlyContainer as ReadOnlyContainer
from ._py_collections import ImmutableDictBase as ImmutableDictBase
from ._py_collections import OrderedSet as OrderedSet
from ._py_collections import unique_list as unique_list
else:
from sqlalchemy.cyextension.immutabledict import (
ReadOnlyContainer as ReadOnlyContainer,
)
from sqlalchemy.cyextension.immutabledict import (
ImmutableDictBase as ImmutableDictBase,
)
from sqlalchemy.cyextension.immutabledict import (
immutabledict as immutabledict,
)
from sqlalchemy.cyextension.collections import IdentitySet as IdentitySet
from sqlalchemy.cyextension.collections import OrderedSet as OrderedSet
from sqlalchemy.cyextension.collections import ( # noqa
unique_list as unique_list,
)
_T = TypeVar("_T", bound=Any)
_KT = TypeVar("_KT", bound=Any)
_VT = TypeVar("_VT", bound=Any)
_T_co = TypeVar("_T_co", covariant=True)
EMPTY_SET: FrozenSet[Any] = frozenset()
NONE_SET: FrozenSet[Any] = frozenset([None])
def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:
"""merge two lists, maintaining ordering as much as possible.
this is to reconcile vars(cls) with cls.__annotations__.
Example::
>>> a = ['__tablename__', 'id', 'x', 'created_at']
>>> b = ['id', 'name', 'data', 'y', 'created_at']
>>> merge_lists_w_ordering(a, b)
['__tablename__', 'id', 'name', 'data', 'y', 'x', 'created_at']
This is not necessarily the ordering that things had on the class,
in this case the class is::
class User(Base):
__tablename__ = "users"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
data: Mapped[Optional[str]]
x = Column(Integer)
y: Mapped[int]
created_at: Mapped[datetime.datetime] = mapped_column()
But things are *mostly* ordered.
The algorithm could also be done by creating a partial ordering for
all items in both lists and then using topological_sort(), but that
is too much overhead.
Background on how I came up with this is at:
https://gist.github.com/zzzeek/89de958cf0803d148e74861bd682ebae
"""
overlap = set(a).intersection(b)
result = []
current, other = iter(a), iter(b)
while True:
for element in current:
if element in overlap:
overlap.discard(element)
other, current = current, other
break
result.append(element)
else:
result.extend(other)
break
return result
def coerce_to_immutabledict(d: Mapping[_KT, _VT]) -> immutabledict[_KT, _VT]:
if not d:
return EMPTY_DICT
elif isinstance(d, immutabledict):
return d
else:
return immutabledict(d)
EMPTY_DICT: immutabledict[Any, Any] = immutabledict()
class FacadeDict(ImmutableDictBase[_KT, _VT]):
"""A dictionary that is not publicly mutable."""
def __new__(cls, *args: Any) -> FacadeDict[Any, Any]:
new = ImmutableDictBase.__new__(cls)
return new
def copy(self) -> NoReturn:
raise NotImplementedError(
"an immutabledict shouldn't need to be copied. use dict(d) "
"if you need a mutable dictionary."
)
def __reduce__(self) -> Any:
return FacadeDict, (dict(self),)
def _insert_item(self, key: _KT, value: _VT) -> None:
"""insert an item into the dictionary directly."""
dict.__setitem__(self, key, value)
def __repr__(self) -> str:
return "FacadeDict(%s)" % dict.__repr__(self)
_DT = TypeVar("_DT", bound=Any)
_F = TypeVar("_F", bound=Any)
class Properties(Generic[_T]):
"""Provide a __getattr__/__setattr__ interface over a dict."""
__slots__ = ("_data",)
_data: Dict[str, _T]
def __init__(self, data: Dict[str, _T]):
object.__setattr__(self, "_data", data)
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Iterator[_T]:
return iter(list(self._data.values()))
def __dir__(self) -> List[str]:
return dir(super()) + [str(k) for k in self._data.keys()]
def __add__(self, other: Properties[_F]) -> List[Union[_T, _F]]:
return list(self) + list(other) # type: ignore
def __setitem__(self, key: str, obj: _T) -> None:
self._data[key] = obj
def __getitem__(self, key: str) -> _T:
return self._data[key]
def __delitem__(self, key: str) -> None:
del self._data[key]
def __setattr__(self, key: str, obj: _T) -> None:
self._data[key] = obj
def __getstate__(self) -> Dict[str, Any]:
return {"_data": self._data}
def __setstate__(self, state: Dict[str, Any]) -> None:
object.__setattr__(self, "_data", state["_data"])
def __getattr__(self, key: str) -> _T:
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key: str) -> bool:
return key in self._data
def as_readonly(self) -> ReadOnlyProperties[_T]:
"""Return an immutable proxy for this :class:`.Properties`."""
return ReadOnlyProperties(self._data)
def update(self, value: Dict[str, _T]) -> None:
self._data.update(value)
@overload
def get(self, key: str) -> Optional[_T]:
...
@overload
def get(self, key: str, default: Union[_DT, _T]) -> Union[_DT, _T]:
...
def get(
self, key: str, default: Optional[Union[_DT, _T]] = None
) -> Optional[Union[_T, _DT]]:
if key in self:
return self[key]
else:
return default
def keys(self) -> List[str]:
return list(self._data)
def values(self) -> List[_T]:
return list(self._data.values())
def items(self) -> List[Tuple[str, _T]]:
return list(self._data.items())
def has_key(self, key: str) -> bool:
return key in self._data
def clear(self) -> None:
self._data.clear()
class OrderedProperties(Properties[_T]):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
__slots__ = ()
def __init__(self):
Properties.__init__(self, OrderedDict())
class ReadOnlyProperties(ReadOnlyContainer, Properties[_T]):
"""Provide immutable dict/object attribute to an underlying dictionary."""
__slots__ = ()
def _ordered_dictionary_sort(d, key=None):
"""Sort an OrderedDict in-place."""
items = [(k, d[k]) for k in sorted(d, key=key)]
d.clear()
d.update(items)
OrderedDict = dict
sort_dictionary = _ordered_dictionary_sort
class WeakSequence(Sequence[_T]):
def __init__(self, __elements: Sequence[_T] = ()):
# adapted from weakref.WeakKeyDictionary, prevent reference
# cycles in the collection itself
def _remove(item, selfref=weakref.ref(self)):
self = selfref()
if self is not None:
self._storage.remove(item)
self._remove = _remove
self._storage = [
weakref.ref(element, _remove) for element in __elements
]
def append(self, item):
self._storage.append(weakref.ref(item, self._remove))
def __len__(self):
return len(self._storage)
def __iter__(self):
return (
obj for obj in (ref() for ref in self._storage) if obj is not None
)
def __getitem__(self, index):
try:
obj = self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
else:
return obj()
class OrderedIdentitySet(IdentitySet):
def __init__(self, iterable: Optional[Iterable[Any]] = None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(Dict[_KT, _VT]):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator: Callable[[_KT], _VT]):
self.creator = creator
def __missing__(self, key: Any) -> Any:
self[key] = val = self.creator(key)
return val
class WeakPopulateDict(Dict[_KT, _VT]):
"""Like PopulateDict, but assumes a self + a method and does not create
a reference cycle.
"""
def __init__(self, creator_method: types.MethodType):
self.creator = creator_method.__func__
weakself = creator_method.__self__
self.weakself = weakref.ref(weakself)
def __missing__(self, key: Any) -> Any:
self[key] = val = self.creator(self.weakself(), key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
class UniqueAppender(Generic[_T]):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
__slots__ = "data", "_data_appender", "_unique"
data: Union[Iterable[_T], Set[_T], List[_T]]
_data_appender: Callable[[_T], None]
_unique: Dict[int, Literal[True]]
def __init__(
self,
data: Union[Iterable[_T], Set[_T], List[_T]],
via: Optional[str] = None,
):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via) # type: ignore[assignment] # noqa: E501
elif hasattr(data, "append"):
self._data_appender = cast("List[_T]", data).append # type: ignore[assignment] # noqa: E501
elif hasattr(data, "add"):
self._data_appender = cast("Set[_T]", data).add # type: ignore[assignment] # noqa: E501
def append(self, item: _T) -> None:
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item) # type: ignore[call-arg]
self._unique[id_] = True
def __iter__(self) -> Iterator[_T]:
return iter(self.data)
def coerce_generator_arg(arg: Any) -> List[Any]:
if len(arg) == 1 and isinstance(arg[0], types.GeneratorType):
return list(arg[0])
else:
return cast("List[Any]", arg)
def to_list(x: Any, default: Optional[List[Any]] = None) -> List[Any]:
if x is None:
return default # type: ignore
if not isinstance(x, collections_abc.Iterable) or isinstance(
x, (str, bytes)
):
return [x]
elif isinstance(x, list):
return x
else:
return list(x)
def has_intersection(set_, iterable):
r"""return True if any items of set\_ are present in iterable.
Goes through special effort to ensure __hash__ is not called
on items in iterable that don't support it.
"""
# TODO: optimize, write in C, etc.
return bool(set_.intersection([i for i in iterable if i.__hash__]))
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x: Any) -> Set[Any]:
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x: Iterable[_T]) -> Iterator[_T]:
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
elem: _T
for elem in x:
if not isinstance(elem, str) and hasattr(elem, "__iter__"):
yield from flatten_iterator(elem)
else:
yield elem
class LRUCache(typing.MutableMapping[_KT, _VT]):
"""Dictionary with 'squishy' removal of least
recently used items.
Note that either get() or [] should be used here, but
generally its not safe to do an "in" check first as the dictionary
can change subsequent to that call.
"""
__slots__ = (
"capacity",
"threshold",
"size_alert",
"_data",
"_counter",
"_mutex",
)
capacity: int
threshold: float
size_alert: Optional[Callable[[LRUCache[_KT, _VT]], None]]
def __init__(
self,
capacity: int = 100,
threshold: float = 0.5,
size_alert: Optional[Callable[..., None]] = None,
):
self.capacity = capacity
self.threshold = threshold
self.size_alert = size_alert
self._counter = 0
self._mutex = threading.Lock()
self._data: Dict[_KT, Tuple[_KT, _VT, List[int]]] = {}
def _inc_counter(self):
self._counter += 1
return self._counter
@overload
def get(self, key: _KT) -> Optional[_VT]:
...
@overload
def get(self, key: _KT, default: Union[_VT, _T]) -> Union[_VT, _T]:
...
def get(
self, key: _KT, default: Optional[Union[_VT, _T]] = None
) -> Optional[Union[_VT, _T]]:
item = self._data.get(key, default)
if item is not default and item is not None:
item[2][0] = self._inc_counter()
return item[1]
else:
return default
def __getitem__(self, key: _KT) -> _VT:
item = self._data[key]
item[2][0] = self._inc_counter()
return item[1]
def __iter__(self) -> Iterator[_KT]:
return iter(self._data)
def __len__(self) -> int:
return len(self._data)
def values(self) -> ValuesView[_VT]:
return typing.ValuesView({k: i[1] for k, i in self._data.items()})
def __setitem__(self, key: _KT, value: _VT) -> None:
self._data[key] = (key, value, [self._inc_counter()])
self._manage_size()
def __delitem__(self, __v: _KT) -> None:
del self._data[__v]
@property
def size_threshold(self) -> float:
return self.capacity + self.capacity * self.threshold
def _manage_size(self) -> None:
if not self._mutex.acquire(False):
return
try:
size_alert = bool(self.size_alert)
while len(self) > self.capacity + self.capacity * self.threshold:
if size_alert:
size_alert = False
self.size_alert(self) # type: ignore
by_counter = sorted(
self._data.values(),
key=operator.itemgetter(2),
reverse=True,
)
for item in by_counter[self.capacity :]:
try:
del self._data[item[0]]
except KeyError:
# deleted elsewhere; skip
continue
finally:
self._mutex.release()
class _CreateFuncType(Protocol[_T_co]):
def __call__(self) -> _T_co:
...
class _ScopeFuncType(Protocol):
def __call__(self) -> Any:
...
class ScopedRegistry(Generic[_T]):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
__slots__ = "createfunc", "scopefunc", "registry"
createfunc: _CreateFuncType[_T]
scopefunc: _ScopeFuncType
registry: Any
def __init__(
self, createfunc: Callable[[], _T], scopefunc: Callable[[], Any]
):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self) -> _T:
key = self.scopefunc()
try:
return self.registry[key] # type: ignore[no-any-return]
except KeyError:
return self.registry.setdefault(key, self.createfunc()) # type: ignore[no-any-return] # noqa: E501
def has(self) -> bool:
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj: _T) -> None:
"""Set the value for the current scope."""
self.registry[self.scopefunc()] = obj
def clear(self) -> None:
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry[_T]):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc: Callable[[], _T]):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self) -> _T:
try:
return self.registry.value # type: ignore[no-any-return]
except AttributeError:
val = self.registry.value = self.createfunc()
return val # type: ignore[no-any-return]
def has(self) -> bool:
return hasattr(self.registry, "value")
def set(self, obj: _T) -> None:
self.registry.value = obj
def clear(self) -> None:
try:
del self.registry.value
except AttributeError:
pass
def has_dupes(sequence, target):
"""Given a sequence and search object, return True if there's more
than one, False if zero or one of them.
"""
# compare to .index version below, this version introduces less function
# overhead and is usually the same speed. At 15000 items (way bigger than
# a relationship-bound collection in memory usually is) it begins to
# fall behind the other version only by microseconds.
c = 0
for item in sequence:
if item is target:
c += 1
if c > 1:
return True
return False
# .index version. the two __contains__ calls as well
# as .index() and isinstance() slow this down.
# def has_dupes(sequence, target):
# if target not in sequence:
# return False
# elif not isinstance(sequence, collections_abc.Sequence):
# return False
#
# idx = sequence.index(target)
# return target in sequence[idx + 1:]
|
9d1307b654696f3eaeb8357b61ebe3bbf1c6d271
|
85ba0a1df162642fe5ca7e14e06f60e5ae3f6d00
|
/recursion/maze.py
|
382d4667c431a3e9820028a0693c70e8183f8bae
|
[
"MIT"
] |
permissive
|
ivanmmarkovic/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python
|
ad478564167b7f3f8eed607cb23bd0190f4d81f8
|
1a6e8fd5b93c9fe87231bef57bd92b81a40ae38d
|
refs/heads/master
| 2023-02-08T10:49:18.390691
| 2023-01-23T22:22:58
| 2023-01-23T22:22:58
| 157,221,493
| 138
| 42
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
maze.py
|
from stack import Stack
size: int = 5
matrix = [[0 for x in range(size)] for y in range(size)]
'''
0 - not visited
1 - visited
2 - obstacle
'''
# place maze obstacles
matrix[0][2] = 2
matrix[0][3] = 2
matrix[1][0] = 2
matrix[2][2] = 2
matrix[2][3] = 2
matrix[2][4] = 2
matrix[3][1] = 2
matrix[4][3] = 2
# starting position
matrix[0][0] = 1
stack = Stack()
stack.push([0, 0])
def path_find(maze: list, stack: Stack):
if stack.is_empty():
print("Path not found")
else:
coords: list = stack.peek()
x: int = coords[0]
y: int = coords[1]
if x == len(maze) - 1 and y == len(maze[x]) - 1:
print("Path found")
elif y < len(maze[x]) - 1 and maze[x][y + 1] == 0:
maze[x][y + 1] = 1
stack.push([x, y + 1])
path_find(maze, stack)
elif y > 0 and maze[x][y - 1] == 0:
maze[x][y - 1] = 1
stack.push([x, y - 1])
path_find(maze, stack)
elif x < len(maze) - 1 and maze[x + 1][y] == 0:
maze[x + 1][y] = 1
stack.push([x + 1, y])
path_find(maze, stack)
elif x > 0 and maze[x - 1][y] == 0:
maze[x - 1][y] = 1
stack.push([x - 1, y])
path_find(maze, stack)
else:
stack.pop()
path_find(maze, stack)
path_find(matrix, stack)
|
009927e9c94bff15be7df860b588cf93c3f5713f
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/benchmarks/benchmarks/bench_trim_zeros.py
|
4e25a8b021b7717cf8acabf7dd6386d043e85d55
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 607
|
py
|
bench_trim_zeros.py
|
from .common import Benchmark
import numpy as np
_FLOAT = np.dtype('float64')
_COMPLEX = np.dtype('complex128')
_INT = np.dtype('int64')
_BOOL = np.dtype('bool')
class TrimZeros(Benchmark):
param_names = ["dtype", "size"]
params = [
[_INT, _FLOAT, _COMPLEX, _BOOL],
[3000, 30_000, 300_000]
]
def setup(self, dtype, size):
n = size // 3
self.array = np.hstack([
np.zeros(n),
np.random.uniform(size=n),
np.zeros(n),
]).astype(dtype)
def time_trim_zeros(self, dtype, size):
np.trim_zeros(self.array)
|
cf010433315f58f9bfb0a6efefbf1e248206c62d
|
3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75
|
/thrift/compiler/test/fixtures/includes/gen-py3/service/builders.pyi
|
a75450570eccccd2e6c92bd097adc0825cdd5320
|
[
"Apache-2.0"
] |
permissive
|
facebook/fbthrift
|
3b7b94a533666c965ce69cfd6054041218b1ea6f
|
53cf6f138a7648efe5aef9a263aabed3d282df91
|
refs/heads/main
| 2023-08-24T12:51:32.367985
| 2023-08-24T08:28:35
| 2023-08-24T08:28:35
| 11,131,631
| 2,347
| 666
|
Apache-2.0
| 2023-09-01T01:44:39
| 2013-07-02T18:15:51
|
C++
|
UTF-8
|
Python
| false
| false
| 511
|
pyi
|
builders.pyi
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import typing as _typing
import folly.iobuf as _fbthrift_iobuf
import thrift.py3.builder
import includes.types as _includes_types
import includes.builders as _includes_builders
import module.types as _module_types
import module.builders as _module_builders
import transitive.types as _transitive_types
import transitive.builders as _transitive_builders
import service.types as _service_types
|
420ed70382bc6eb7adb5fa0f513ba1bd7ad6dee3
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/tests/functional/cfngin/test_rollback_dependant/test_runner.py
|
f66a155c56fe772009e864147e0eb82a914cf0ad
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
test_runner.py
|
"""Test failed stack with dependency."""
# pylint: disable=redefined-outer-name
from __future__ import annotations
import shutil
from pathlib import Path
from typing import TYPE_CHECKING, Generator
import pytest
from runway._cli import cli
if TYPE_CHECKING:
from click.testing import CliRunner, Result
CURRENT_DIR = Path(__file__).parent
@pytest.fixture(scope="module")
def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]:
"""Execute `runway deploy` with `runway destroy` as a cleanup step."""
yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"})
assert cli_runner.invoke(cli, ["destroy"], env={"CI": "1"}).exit_code == 0
shutil.rmtree(CURRENT_DIR / ".runway", ignore_errors=True)
@pytest.mark.order("first")
def test_deploy_exit_code(deploy_result: Result) -> None:
"""Test deploy exit code."""
assert deploy_result.exit_code != 0
@pytest.mark.order(after="test_deploy_exit_code")
def test_deploy_log_messages(deploy_result: Result, namespace: str) -> None:
"""Test deploy log messages."""
expected_lines = [
"cfngin.yml:deploy (in progress)",
"dependent-rollback-parent:submitted (creating new stack)",
f"{namespace}-dependent-rollback-parent:roll back reason: "
"The following resource(s) failed to create: [BrokenWaitCondition]. "
"Rollback requested by user.",
"dependent-rollback-child:failed (dependency has failed)",
"The following steps failed: dependent-rollback-parent, dependent-rollback-child",
]
for line in expected_lines:
assert f"[runway] {line}" in deploy_result.stdout, (
"stdout is missing expected line\n\nEXPECTED:\n"
f"{line}\n\nSTDOUT:\n{deploy_result.stdout}"
)
|
1d8488b7a965225df0d2c68daa922cb4a208262e
|
96bf0a09449c1fb415b89e937d57556a67204252
|
/pysheaf/__init__.py
|
112b985242b32861551859dc28f2ce50f37332a9
|
[
"Apache-2.0"
] |
permissive
|
kb1dds/pysheaf
|
382cccee281845d7072291ccc1379b78a7cf1574
|
d636794c5a9ade2ed7af74514333930489f600c4
|
refs/heads/master
| 2022-05-01T00:50:29.592971
| 2022-04-27T20:05:25
| 2022-04-27T20:05:25
| 23,200,854
| 117
| 27
| null | 2019-04-23T12:04:57
| 2014-08-21T19:53:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 23
|
py
|
__init__.py
|
from .pysheaf import *
|
f9b209c37e68db52f915d70903b83ce3ae56f4d0
|
7030c780db36c7d8efedb1152cf945a3cc248fdb
|
/python/cuml/tests/explainer/test_explainer_kernel_shap.py
|
74c985989f6c781fbefb14eebb835f3ddc086b08
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cuml
|
546af8151fd2ee0f737cc4e62386d4b0ede74f3d
|
7d86042b8de06bc8acce632230fe5821bd36c17d
|
refs/heads/branch-23.10
| 2023-08-30T19:17:41.816373
| 2023-08-28T13:23:15
| 2023-08-28T13:23:15
| 152,616,802
| 3,615
| 569
|
Apache-2.0
| 2023-09-14T00:21:52
| 2018-10-11T15:45:35
|
C++
|
UTF-8
|
Python
| false
| false
| 20,404
|
py
|
test_explainer_kernel_shap.py
|
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from cuml.testing.utils import (
create_synthetic_dataset,
ClassEnumerator,
get_shap_values,
)
from cuml.datasets import make_regression
from cuml.internals.import_utils import has_shap
from cuml.internals.import_utils import has_scipy
from cuml import KernelExplainer
from cuml import Lasso
import sklearn.neighbors
import pytest
import math
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
models_config = ClassEnumerator(module=cuml)
models = models_config.get_models()
def assert_and_log(
cu_shap_values, golden_result_values, fx, expected, tolerance=1e-02
):
close_values = np.allclose(
cu_shap_values, golden_result_values, rtol=tolerance, atol=tolerance
)
expected_sum = np.allclose(
1.00, np.sum(cp.asnumpy(cu_shap_values)) / (fx - expected), rtol=1e-01
)
if not close_values:
print("cu_shap_values: ")
print(cu_shap_values)
print("golden_result_values")
print(golden_result_values)
if not expected_sum:
print(np.sum(cp.asnumpy(cu_shap_values)))
assert expected_sum
assert close_values
###############################################################################
# End to end tests #
###############################################################################
@pytest.mark.parametrize(
"model", [cuml.LinearRegression, cuml.KNeighborsRegressor, cuml.SVR]
)
def test_exact_regression_datasets(exact_shap_regression_dataset, model):
X_train, X_test, y_train, y_test = exact_shap_regression_dataset
models = []
models.append(model().fit(X_train, y_train))
models.append(cuml_skl_class_dict[model]().fit(X_train, y_train))
for mod in models:
explainer, shap_values = get_shap_values(
model=mod.predict,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
for i in range(3):
print(i)
assert_and_log(
shap_values[i],
golden_regression_results[model][i],
mod.predict(X_test[i].reshape(1, X_test.shape[1])),
explainer.expected_value,
)
def test_exact_classification_datasets(exact_shap_classification_dataset):
X_train, X_test, y_train, y_test = exact_shap_classification_dataset
models = []
models.append(cuml.SVC(probability=True).fit(X_train, y_train))
models.append(sklearn.svm.SVC(probability=True).fit(X_train, y_train))
for mod in models:
explainer, shap_values = get_shap_values(
model=mod.predict_proba,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
# Some values are very small, which mean our tolerance here needs to be
# a little looser to avoid false positives from comparisons like
# 0.00348627 - 0.00247397. The loose tolerance still tests that the
# distribution of the values matches.
for idx, svs in enumerate(shap_values):
assert_and_log(
svs[0],
golden_classification_result[idx],
float(mod.predict_proba(X_test)[0][idx]),
explainer.expected_value[idx],
tolerance=1e-01,
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("n_features", [10, 30])
@pytest.mark.parametrize("n_background", [10, 30])
@pytest.mark.parametrize("model", [cuml.TruncatedSVD, cuml.PCA])
def test_kernel_shap_standalone(dtype, n_features, n_background, model):
X_train, X_test, y_train, y_test = create_synthetic_dataset(
n_samples=n_background + 3,
n_features=n_features,
test_size=3,
noise=0.1,
dtype=dtype,
)
mod = model(n_components=3).fit(X_train, y_train)
explainer, shap_values = get_shap_values(
model=mod.transform,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
exp_v = explainer.expected_value
# we have 3 lists of shap values, each corresponding to a component since
# transform gives back arrays of shape (nrows x ncomponents)
# we test that for each test row, for each component, the
# sum of the shap values is the same as the difference between the
# expected value for that component minus the value of the transform of
# the row.
for sv_idx in range(3):
# pca and tsvd transform give results back nested
fx = mod.transform(X_test[sv_idx].reshape(1, n_features))[0]
for comp_idx in range(3):
assert (
np.sum(shap_values[comp_idx][sv_idx])
- abs(fx[comp_idx] - exp_v[comp_idx])
) <= 1e-5
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("n_features", [11, 15])
@pytest.mark.parametrize("n_background", [30])
@pytest.mark.parametrize("model", [cuml.SVR])
def test_kernel_gpu_cpu_shap(dtype, n_features, n_background, model):
X_train, X_test, y_train, y_test = create_synthetic_dataset(
n_samples=n_background + 3,
n_features=n_features,
test_size=3,
noise=0.1,
dtype=dtype,
)
mod = model().fit(X_train, y_train)
explainer, shap_values = get_shap_values(
model=mod.predict,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
exp_v = explainer.expected_value
fx = mod.predict(X_test)
for test_idx in range(3):
assert (
np.sum(shap_values[test_idx]) - abs(fx[test_idx] - exp_v)
) <= 1e-5
if has_shap():
import shap
explainer = shap.KernelExplainer(mod.predict, cp.asnumpy(X_train))
cpu_shap_values = explainer.shap_values(cp.asnumpy(X_test))
assert np.allclose(
shap_values, cpu_shap_values, rtol=1e-01, atol=1e-01
)
def test_kernel_housing_dataset(housing_dataset):
X, y, _ = housing_dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
# making all float32 to use gpu predict on random forest
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
y_train = y_train.astype(np.float32)
y_test = y_test.astype(np.float32)
cumodel = cuml.RandomForestRegressor().fit(X_train, y_train)
explainer = KernelExplainer(
model=cumodel.predict, data=X_train[:100], output_type="numpy"
)
cu_shap_values = explainer.shap_values(X_test[:2])
assert np.allclose(
cu_shap_values, housing_regression_result, rtol=1e-01, atol=1e-01
)
###############################################################################
# Single function unit tests #
###############################################################################
def test_binom_coef():
for i in range(1, 101):
val = cuml.explainer.kernel_shap._binomCoef(100, i)
if has_scipy():
from scipy.special import binom
assert math.isclose(val, binom(100, i), rel_tol=1e-15)
def test_shapley_kernel():
for i in range(11):
val = cuml.explainer.kernel_shap._shapley_kernel(10, i)
assert val == shapley_kernel_results[i]
def test_full_powerset():
ps, w = cuml.explainer.kernel_shap._powerset(
5, 2, 2**5 - 2, full_powerset=True
)
for i in range(len(ps)):
assert np.all(ps[i] == full_powerset_result[i])
assert math.isclose(w[i], full_powerset_weight_result[i])
def test_partial_powerset():
ps, w = cuml.explainer.kernel_shap._powerset(6, 3, 42)
for i in range(len(ps)):
assert np.all(ps[i] == partial_powerset_result[i])
assert math.isclose(w[i], partial_powerset_weight_result[i])
@pytest.mark.parametrize("full_powerset", [True, False])
def test_get_number_of_exact_random_samples(full_powerset):
if full_powerset:
(
nsamples_exact,
nsamples_random,
ind,
) = cuml.explainer.kernel_shap._get_number_of_exact_random_samples(
10, 2**10 + 1
)
assert nsamples_exact == 1022
assert nsamples_random == 0
assert ind == 5
else:
(
nsamples_exact,
nsamples_random,
ind,
) = cuml.explainer.kernel_shap._get_number_of_exact_random_samples(
10, 100
)
assert nsamples_exact == 20
assert nsamples_random == 80
assert ind == 2
def test_generate_nsamples_weights():
samples, w = cuml.explainer.kernel_shap._generate_nsamples_weights(
ncols=20,
nsamples=30,
nsamples_exact=10,
nsamples_random=20,
randind=5,
dtype=np.float32,
)
# check that all our samples are between 5 and 6, and the weights in pairs
# are generated correctly
for i, s in enumerate(samples):
assert s in [5, 6]
assert w[i * 2] == cuml.explainer.kernel_shap._shapley_kernel(
20, int(s)
)
assert w[i * 2 + 1] == cuml.explainer.kernel_shap._shapley_kernel(
20, int(s)
)
@pytest.mark.parametrize(
"l1_type", ["auto", "aic", "bic", "num_features(3)", 0.2]
)
def test_l1_regularization(exact_shap_regression_dataset, l1_type):
# currently this is a code test, not mathematical results test.
# Hard to test without falling into testing the underlying algorithms
# which are out of this unit test scope.
X, w = cuml.explainer.kernel_shap._powerset(
5, 2, 2**5 - 2, full_powerset=True
)
y = cp.random.rand(X.shape[0])
nz = cuml.explainer.kernel_shap._l1_regularization(
X=cp.asarray(X).astype(np.float32),
y=cp.asarray(y).astype(np.float32),
weights=cp.asarray(w),
expected_value=0.0,
fx=0.0,
link_fn=cuml.explainer.common.identity,
l1_reg=l1_type,
)
assert isinstance(nz, cp.ndarray)
@pytest.mark.skip(reason="Currently failing for unknown reasons.")
def test_typeerror_input():
X, y = make_regression(n_samples=100, n_features=10, random_state=10)
clf = Lasso()
clf.fit(X, y)
exp = KernelExplainer(model=clf.predict, data=X, nsamples=10)
try:
_ = exp.shap_values(X)
assert True
except TypeError:
assert False
###############################################################################
# Precomputed results #
# and testing variables #
###############################################################################
# "golden" results obtained by running brute force Kernel SHAP notebook from
# https://github.com/slundberg/shap/blob/master/notebooks/kernel_explainer/Simple%20Kernel%20SHAP.ipynb
# and confirmed with SHAP package.
golden_regression_results = {
cuml.LinearRegression: [
[
-1.3628216e00,
-1.0234555e02,
1.3433075e-01,
-6.1763966e01,
2.6035309e-04,
-3.4455872e00,
-1.0159061e02,
3.4058199e00,
4.1598396e01,
7.2152481e01,
-2.1964417e00,
],
[
-8.6558792e01,
8.9456577e00,
-3.6405910e01,
1.0574381e01,
-4.1580200e-04,
-5.8939896e01,
4.8407948e01,
1.4475842e00,
-2.0742226e01,
6.6378265e01,
-3.5134201e01,
],
[
-1.3722158e01,
-2.9430325e01,
-8.0079269e01,
1.2096907e02,
1.0681152e-03,
-5.4266449e01,
-3.1012087e01,
-7.9640961e-01,
7.7072838e01,
1.5370981e01,
-2.4032040e01,
],
],
cuml.KNeighborsRegressor: [
[
4.3210926,
-47.497078,
-4.523407,
-35.49657,
-5.5174675,
-14.158726,
-51.303787,
-2.6457424,
12.230529,
52.345207,
6.3014755,
],
[
-52.036957,
2.4158602,
-20.302296,
15.428952,
5.9823637,
-20.046719,
22.46046,
-4.762917,
-6.20145,
37.457417,
5.3511925,
],
[
-8.803419,
-7.4095736,
-48.113777,
57.21296,
1.0490589,
-37.94751,
-20.748789,
-0.22258139,
28.204493,
4.5492225,
0.5797138,
],
],
cuml.SVR: [
[
3.53810340e-02,
-8.11021507e-01,
3.34369540e-02,
-8.68727207e-01,
1.06804073e-03,
-1.14741415e-01,
-1.35545099e00,
3.87545109e-01,
4.43311602e-01,
1.08623052e00,
2.65314579e-02,
],
[
-1.39247358e00,
5.91157824e-02,
-4.33764964e-01,
1.04503572e-01,
-4.41753864e-03,
-1.09017754e00,
5.90143979e-01,
1.08445108e-01,
-2.26831138e-01,
9.69056726e-01,
-1.18437767e-01,
],
[
-1.28573015e-01,
-2.33658075e-01,
-1.02735841e00,
1.47447693e00,
-1.99043751e-03,
-1.11328888e00,
-4.66209412e-01,
-1.02243885e-01,
8.18460345e-01,
2.20144764e-01,
-9.62769389e-02,
],
],
}
# For testing predict proba, we get one array of shap values per class
golden_classification_result = [
[
0.00152159,
0.00247397,
0.00250474,
0.00155965,
0.0113184,
-0.01153999,
0.19297145,
0.17027254,
0.00850102,
-0.01293354,
-0.00088981,
],
[
-0.00152159,
-0.00247397,
-0.00250474,
-0.00155965,
-0.0113184,
0.01153999,
-0.19297145,
-0.17027254,
-0.00850102,
0.01293354,
0.00088981,
],
]
housing_regression_result = np.array(
[
[
-0.73860609,
0.00557072,
-0.05829297,
-0.01582018,
-0.01010366,
-0.23167623,
-0.470639,
-0.07584473,
],
[
-0.6410764,
0.01369913,
-0.09492759,
0.02654463,
-0.00911134,
-0.05953105,
-0.51266433,
-0.0853608,
],
],
dtype=np.float32,
)
cuml_skl_class_dict = {
cuml.LinearRegression: sklearn.linear_model.LinearRegression,
cuml.KNeighborsRegressor: sklearn.neighbors.KNeighborsRegressor,
cuml.SVR: sklearn.svm.SVR,
}
# results for individual function unit tests
shapley_kernel_results = [
10000,
0.1,
0.0125,
0.0035714285714285713,
0.0017857142857142857,
0.0014285714285714286,
0.0017857142857142857,
0.0035714285714285713,
0.0125,
0.1,
10000,
]
full_powerset_result = [
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0],
]
full_powerset_weight_result = np.array(
[
0.2,
0.2,
0.2,
0.2,
0.2,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.2,
0.2,
0.2,
0.2,
0.2,
],
dtype=np.float32,
)
partial_powerset_result = [
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
]
partial_powerset_weight_result = np.array(
[
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
],
dtype=np.float32,
)
|
8c849048db9d5e67b73c58a482c1fd90365a4599
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/tools/fw_upgrade/entity_upgrader.py
|
d3877b310c61f9506352fa2ff4c1e6b2af2cc179
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 20,103
|
py
|
entity_upgrader.py
|
#!/usr/bin/python3
import hashlib
import logging
import mmap
import os
import re
import subprocess
import time
from exceptions import (
FwUpgraderFailedUpgrade,
FwUpgraderMissingFileHash,
FwUpgraderMissingJsonKey,
FwUpgraderUnexpectedFileHash,
FwUpgraderUnexpectedJsonKey,
)
from typing import Any, Dict, Iterable, List, Optional
from constants import (
UFW_CMD,
UFW_CONDITION,
UFW_CONTINUE_ON_ERROR,
UFW_ENTITY_INSTANCE,
UFW_GET_VERSION,
UFW_HASH,
UFW_HASH_VALUE,
UFW_NAME,
UFW_POST_ACTION,
UFW_PRIORITY,
UFW_VERSION,
HashType,
ResetMode,
UpgradeState,
)
class FwEntityUpgrader(object):
_REGEX_VERSION_PATTERN = r"^[v]?([0-9]*)\.([0-9]*)$"
_REGEX_STRING_PATTERN = r"^[0-9a-zA-Z\-]+$"
def __init__(
self,
fw_entity: str,
fw_info, # type: Dict[str, Any]
binarypath: str,
stop_on_error: bool = False,
dryrun: bool = False,
forced_upgrade: bool = False,
):
"""
Parameters:
fw_entity: fw entity to upgrade
fw_info: info about fw entity
stop_on_error: param to track if upgrade should stop on any failure
dryrun: echo upgrade commands, but do not run them
forced_upgrade: Even if versions are same, should we need to force an upgrade
"""
self._fw_entity = fw_entity # type: str
# Using Any for expecting a str, list of str
self._fw_info = fw_info # type: Dict[str, Any]
self._stop_on_error = stop_on_error # type: bool
self._dryrun = dryrun # type: bool
self._forced_upgrade = forced_upgrade # type: bool
self._cwd = binarypath # type: str
# =========================================================================
# Helper APIs for running though a upgrade
# =========================================================================
# Step 1
def _get_entities_list(
self, entities: Iterable[str] = None
) -> Iterable[Optional[str]]:
"""
Given a entity list string like
["1", "2", "3", "4", "5", "6", "7", "8"]
or
["left"]
verify entities are in the expected format and return list
"""
if not entities:
# Default to 1 value so we iterate at least once
return [None]
for item in entities:
matched = re.findall(self._REGEX_STRING_PATTERN, item)
if not matched:
msg = "Malformed string in list : {} ".format(entities)
raise FwUpgraderUnexpectedJsonKey(msg)
return entities
# Step 2.1
def _is_file_md5sum_match(self, filename: str, expected_md5sum: str) -> bool:
"""
Given a filename, verify if its md5sum matches the expected value
"""
h_md5 = hashlib.md5()
with open(filename, "r+b") as f, mmap.mmap(f.fileno(), 0) as mm:
# mypi doesnt like mmap but it works!
h_md5.update(mm) # type: ignore
file_md5 = h_md5.hexdigest()
if expected_md5sum != file_md5:
logging.info("FILE MD5 : " + file_md5)
logging.info("JSON MD5 : " + expected_md5sum)
return expected_md5sum == file_md5
# Step 2.2
def _is_file_sha1sum_match(self, filename: str, expected_sha1sum: str) -> bool:
"""
Given a filename, verify if its sha1sum matches the expected value
"""
h_sha1 = hashlib.sha1()
with open(filename, "r+b") as f, mmap.mmap(f.fileno(), 0) as mm:
# mypi doesnt like mmap but it works!
h_sha1.update(mm) # type: ignore
file_sha1 = h_sha1.hexdigest()
if expected_sha1sum != file_sha1:
logging.info("FILE SHA1 : " + file_sha1)
logging.info("JSON SHA1 : " + expected_sha1sum)
return expected_sha1sum == file_sha1
# Step 2
def _verify_item_binary(self, filename: str) -> None:
"""
Verify if fw_entity's file has valid SHASUM or MD5SUM
"""
if not UFW_HASH and UFW_HASH_VALUE in self._fw_info:
msg = "Hash missing for fw entity {}".format(self._fw_entity)
raise FwUpgraderMissingFileHash(msg)
if self._fw_info[UFW_HASH] == HashType.SHA1SUM.value:
if not self._is_file_sha1sum_match(filename, self._fw_info[UFW_HASH_VALUE]):
msg = "sha1sum not matching for fw entity {}".format(self._fw_entity)
raise FwUpgraderUnexpectedFileHash(msg)
elif self._fw_info[UFW_HASH] == HashType.MD5SUM.value:
if not self._is_file_md5sum_match(filename, self._fw_info[UFW_HASH_VALUE]):
msg = "md5sum not matching for fw entity {}".format(self._fw_entity)
raise FwUpgraderUnexpectedFileHash(msg)
else:
msg = "Unknown hash for {} {}".format(
self._fw_entity, self._fw_info[UFW_HASH]
)
raise FwUpgraderUnexpectedFileHash(msg)
# Step 3.1
def _compare_current_and_package_versions(self, current_ver, package_ver) -> bool:
"""
# Version string can be random. So we try the following strategy
# Case 1. If it is x.xx format or vx.xx, parse it as major / minor,
# and compare accordingly
# Case 2. Otherwise, we just compare it as string
"""
need_to_upgrade = False
re_pattern = self._REGEX_VERSION_PATTERN
re_result = re.search(re_pattern, current_ver)
if re_result:
# Case 1
logging.debug("Current Version : {}".format(current_ver))
logging.debug("JSON Version : {}".format(package_ver))
cur_major = int(re_result.group(1))
cur_minor = int(re_result.group(2))
pkg_re = re.search(re_pattern, package_ver)
if pkg_re:
pkg_major = int(pkg_re.group(1))
pkg_minor = int(pkg_re.group(2))
logging.debug(
"FW Entity : {}(version number comparison)".format(
self._fw_entity
)
)
logging.debug(
"JSON Version (after parsing) : {}.{}".format(
str(pkg_major), str(pkg_minor)
)
)
logging.debug(
"Current Version (after parsing) : {}.{}".format(
str(cur_major), str(cur_minor)
)
)
need_to_upgrade = (pkg_major, pkg_minor) != (cur_major, cur_minor)
else:
logging.warning("Couldnt parse version, defaulting to upgrade")
need_to_upgrade = True
else:
# Case 2
logging.debug("FW Entity : {}(string comparison)".format(self._fw_entity))
logging.debug("JSON Version (after parsing) : {}".format(str(package_ver)))
logging.debug(
"Current Version (after parsing) : {}".format(str(current_ver))
)
need_to_upgrade = package_ver != current_ver
return need_to_upgrade
# Step 3
def _is_entity_upgrade_needed(self, instance_specifier=None) -> bool:
"""
Given one component check is firmware upgrade needed or not based on the
version
"""
need_to_upgrade = False
entities = self._get_entity_list_string_in_json()
instance_list = self._get_entities_list(entities)
for instance in instance_list:
# If instance specifier is set, deal with that instance only
if instance_specifier and instance_specifier != instance:
pass
else:
if self._is_version_set_in_json():
cmd_to_execute = self._fw_info[UFW_GET_VERSION]
if entities:
cmd_to_execute = cmd_to_execute.format(entity=instance)
current_version = (
subprocess.check_output(cmd_to_execute, shell=True) # noqa P204
.decode()
.strip()
)
need_to_upgrade = self._compare_current_and_package_versions(
current_version, self._fw_info[UFW_VERSION]
)
else:
logging.debug(
"=== No way to check version. Defaulting to upgrade..."
)
need_to_upgrade = True
if need_to_upgrade:
logging.debug("fw_entity needs upgrade")
else:
logging.debug("fw_entity does not need upgrade")
return need_to_upgrade
def _run_cmd_on_oob(self, cmd_to_execute: str) -> int:
"""
Run command on OOB
"""
logging.info("=== Running command : {}".format(cmd_to_execute))
if self._dryrun:
cmd_to_execute = "echo dryrun: " + cmd_to_execute
try:
subprocess.check_call(cmd_to_execute, shell=True, stderr=subprocess.STDOUT)
return 0
except subprocess.CalledProcessError as e:
logging.info("Exception {} occured when running command".format(e))
return e.returncode
# Step 4
def _upgrade_executor(self, filename: str, instance_specifier=None):
"""
upgrade executor for an entity
inputs:
instance_specifier: identify a specific instance like pim '1' etc
filename: binary that will be used for flashing
"""
logging.info(
"=== Upgrading...{} dryrun={}".format(self._fw_entity, self._dryrun)
)
instance_successful = True
cmd_to_execute = self._fw_info[UFW_CMD]
if not instance_specifier:
cmd_to_execute = cmd_to_execute.format(filename=filename)
else:
cmd_to_execute = cmd_to_execute.format(
filename=filename, entity=instance_specifier
)
return_code = self._run_cmd_on_oob(cmd_to_execute)
instance_successful = return_code == 0
if not instance_successful:
logging.info(
"=== Error occured with return code : {}".format(str(return_code))
)
return return_code, instance_successful
# Step 5
def _run_post_upgrade_action(self, item_successful: UpgradeState):
"""
Inputs:
item_successful: Status of all entities in list upgraded successfully or not
"""
if self._is_post_action_set_in_json():
if item_successful == UpgradeState.FAIL:
logging.info(
"=== Will not run post action command,"
"as one or more instances of this fw_entity failed to upgrade"
)
elif item_successful == UpgradeState.SUCCESS:
post_action = self._fw_info[UFW_POST_ACTION]
if self._dryrun:
post_action = "echo dryrun: " + post_action
logging.info(
"=== Running post action command : {}".format(post_action)
)
subprocess.check_output(post_action, shell=True) # noqa p204
else:
logging.info("=== Upgrade skipped. Will not run post action")
# ========================================================================
# API publically accessible for upgrading entity
# ========================================================================
def upgrade_entity(self) -> List:
"""
Upgrade fw_entity in json, if fw_entity has multiple entities(instances)
then verify and upgrade them all.
Steps:
1) Get Instance list
2) Verify if binary file md5/sha match
3) Verify if upgrade is needed
4) Perform upgrade if:
4.1) Condition set in json passes
4.2) Lower version and needs upgrade
5) Post upgrade actions
"""
failure_list = [] # Track instance(s) failed
entity_upgrade = UpgradeState.NONE
item_priority = self._fw_info[UFW_PRIORITY]
logging.info(
"\n\n=== Upgrading fw_entity : {} (Priority : {}) ===".format(
self._fw_entity, str(item_priority)
)
)
instance_list = self._get_entities_list(self._get_entity_list_string_in_json())
filename = os.path.join(self._cwd, self._fw_info[UFW_NAME])
self._verify_item_binary(filename)
for instance in instance_list:
logging.info("\n=== Entity : {} dryrun={}".format(instance, self._dryrun))
start_time = time.time()
if (
self._is_entity_upgrade_needed(instance_specifier=instance)
or self._forced_upgrade
):
# Check if "condition" field is set. If so, check that condition
if self._is_condition_set_in_json(instance_specifier=instance):
return_code, instance_successful = self._upgrade_executor(
filename, instance_specifier=instance
)
if instance_successful:
entity_upgrade = UpgradeState.SUCCESS
else:
# Even if 1 instance failed to upgrade, deem whole entity
# failed
entity_upgrade = UpgradeState.FAIL
end_time = time.time()
msg = "<{} instance : {}>".format(self._fw_entity, instance)
failure_list.append(msg)
logging.info(
"=== Time Elapsed : {}sec".format(end_time - start_time)
)
# If stop_on_error is set, stop no matter what
# Otherwise, check if continue_on_error field is set in JSON
if self._is_continue_on_error_set_in_json():
logging.info(
"=== Error occured with return code : {}".format(
str(return_code)
)
)
logging.info(
"=== Continuing, as continue_on_error"
" is set in JSON field"
)
else:
# fail hard
raise FwUpgraderFailedUpgrade(
"Return code : {}".format(str(return_code))
)
else:
logging.info(
"=== Condition not met. Will not upgrade {} instance {}".format(
self._fw_entity, instance
)
)
end_time = time.time()
logging.info(
"=== Time Elapsed : {}sec".format(
str(int(end_time) - int(start_time))
)
)
else:
logging.info("=== Already up to date")
self._run_post_upgrade_action(entity_upgrade)
return failure_list
# =============================================================================
# APIs to check/get data from json dict
# =============================================================================
def _get_entity_list_string_in_json(self) -> Optional[List[str]]:
"""
FW entity can provide a "entities" list optionally. When that is provided
honor it
"""
return self._fw_info.get(UFW_ENTITY_INSTANCE, None)
def _is_condition_set_in_json(self, instance_specifier=None) -> bool:
if UFW_CONDITION not in self._fw_info:
return True
cmd_to_execute = self._fw_info[UFW_CONDITION]
if instance_specifier:
cmd_to_execute = cmd_to_execute.format(entity=instance_specifier)
try:
subprocess.check_output(cmd_to_execute, shell=True) # noqa p204
return True
except subprocess.CalledProcessError as e:
logging.info("Exception {} occured when running command".format(e))
return False
def _is_continue_on_error_set_in_json(self) -> bool:
if not self._stop_on_error and (
UFW_CONTINUE_ON_ERROR in self._fw_info
and self._fw_info[UFW_CONTINUE_ON_ERROR]
):
return True
return False
def _is_version_set_in_json(self) -> bool:
return UFW_GET_VERSION in self._fw_info
def _is_post_action_set_in_json(self) -> bool:
return UFW_POST_ACTION in self._fw_info
class FwUpgrader(object):
_POWER_RESET = "wedge_power.sh reset"
_POWER_RESET_HARD = "wedge_power.sh reset -s"
def __init__(
self,
json: Dict,
binarypath: str,
stop_on_error: bool = False,
dryrun: bool = False,
forced_upgrade: bool = False,
reset: ResetMode = ResetMode.NO_RESET,
):
self._ordered_json = json # type: Dict
self._stop_on_error = stop_on_error # type: bool
self._dryrun = dryrun # type: bool
self._forced_upgrade = forced_upgrade # type: bool
self._reset = reset # type: ResetMode
self._cwd = binarypath # type: str
# TODO: Return should be Union[str, List[str], None]
def _get_fw_info_for_entity(self, fw_entity: str) -> Dict[str, Any]:
if fw_entity in self._ordered_json:
return self._ordered_json[fw_entity]
else:
msg = "FW entity {} not found in JSON table!".format(fw_entity)
raise FwUpgraderMissingJsonKey(msg)
def _entity_upgrade_needed(self, fw_entity: str) -> bool:
"""
Given one entity check is firmware upgrade needed or not
"""
return FwEntityUpgrader(
fw_entity,
self._get_fw_info_for_entity(fw_entity),
binarypath=self._cwd,
stop_on_error=self._stop_on_error,
dryrun=self._dryrun,
forced_upgrade=self._forced_upgrade,
)._is_entity_upgrade_needed()
# =========================================================================
# API publically accessible for upgrading all entities
# =========================================================================
def is_any_upgrade_needed(self) -> bool:
for fw_entity in self._ordered_json:
if self._entity_upgrade_needed(fw_entity):
return True
return False
def run_upgrade(self) -> bool:
"""
Upgrade each upgradable fw_entity in json
"""
all_successful = True # type: bool
entity_failure_list = [] # type: List[str]
fail_list = [] # type: List[str]
for fw_entity in self._ordered_json:
# fw_entity could have multiple instances, hence fetch list
entity_failure_list = FwEntityUpgrader(
fw_entity,
self._get_fw_info_for_entity(fw_entity),
binarypath=self._cwd,
stop_on_error=self._stop_on_error,
dryrun=self._dryrun,
forced_upgrade=self._forced_upgrade,
).upgrade_entity()
if len(entity_failure_list):
all_successful = False
fail_list.extend(entity_failure_list)
if not all_successful:
raise FwUpgraderFailedUpgrade(",".join(fail_list))
return all_successful
def reboot_as_needed(self) -> None:
if self._reset == ResetMode.USERVER_RESET.value:
subprocess.check_output(self._POWER_RESET, shell=True) # noqa p204
elif self._reset == ResetMode.HARD_RESET.value:
subprocess.check_output(self._POWER_RESET_HARD, shell=True) # noqa p204
|
df5c1f39de233d43b28b296d31978582a458ec17
|
312a8fde11293cb142334a3860966ec1f75ac401
|
/timesketch/lib/analyzers/win_crash.py
|
1935fd4250eea2398db1a6072ff026cc0959b145
|
[
"Apache-2.0"
] |
permissive
|
google/timesketch
|
f0fd09062a8a24bac581d2d4286d095d667d2f10
|
24f471b58ca4a87cb053961b5f05c07a544ca7b8
|
refs/heads/master
| 2023-08-31T21:48:19.602686
| 2023-08-31T11:24:17
| 2023-08-31T11:24:17
| 21,009,909
| 2,263
| 647
|
Apache-2.0
| 2023-09-14T14:08:07
| 2014-06-19T17:49:45
|
Python
|
UTF-8
|
Python
| false
| false
| 6,653
|
py
|
win_crash.py
|
"""Sketch analyzer plugin for Windows crash artefacts."""
from __future__ import unicode_literals
import re
from timesketch.lib.analyzers import interface
from timesketch.lib.analyzers import manager
class WinCrashSketchPlugin(interface.BaseAnalyzer):
"""Analyzer for Windows application crashes."""
NAME = "win_crash"
DISPLAY_NAME = "Windows application crashes"
DESCRIPTION = "Detect Windows application crashes"
DEPENDENCIES = frozenset()
FILENAME_REGEX = re.compile(
r"(?:\\|\/)(?:AppCrash|Critical|NonCritical)_(.+?\.exe)_[a-f0-9]{16,}"
r"|\'([^\']+\.exe)\'",
re.IGNORECASE,
)
QUERY_ELEMENTS = {
"Event - App Error": (
'data_type:"windows:evtx:record"',
'source_name:"Application Error"',
'event_identifier:"1000"',
'event_level:"2"', # Level: Error
),
"Event - WER": (
'data_type:"windows:evtx:record"',
'source_name:"Windows Error Reporting"',
'event_identifier:"1001"',
'event_level:"4"', # Level: Info
),
"Event - BSOD": (
'data_type:"windows:evtx:record"',
'source_name:"Microsoft-Windows-WER-SystemErrorReporting"',
'event_identifier:"1001"',
'event_level:"2"', # Level: Error
),
"Event - App Hang": (
'data_type:"windows:evtx:record"',
'source_name:"Application Error"',
'event_identifier:"1002"',
'event_level:"2"', # Level: Error
),
"Event - EMET Warning": (
'data_type:"windows:evtx:record"',
'source_name:"EMET"',
'event_identifier:"1"',
'event_level:"3"', # Level: Warning
),
"Event - EMET Error": (
'data_type:"windows:evtx:record"',
'source_name:"EMET"',
'event_identifier:"1"',
'event_level:"2"', # Level: Error
),
"Event - .NET App Crash": (
'data_type:"windows:evtx:record"',
'source_name:".NET Runtime"',
'event_identifier:"1026"',
'event_level:"2"', # Level: Error
),
"File - WER Report": (
'data_type:"fs:stat"',
'filename:"/Microsoft/Windows/WER/"',
"filename:/((Non)?Critical|AppCrash)_.*/",
'file_entry_type:("directory" or "2")',
),
"Registry - Crash Reporting": (
'data_type:"windows:registry:key_value"',
r'key_path:"\\Control\\CrashControl"',
'values:("LogEvent: REG_DWORD_LE 0"'
+ ' OR "SendAlert: REG_DWORD_LE 0"'
+ ' OR "CrashDumpEnabled: REG_DWORD_LE 0")',
),
"Registry - Error Reporting": (
'data_type:"windows:registry:key_value"',
r'key_path:"\\Software\\Microsoft\\PCHealth\\ErrorReporting"',
'values:("DoReport: REG_DWORD_LE 0" OR "ShowUI: REG_DWORD_LE 0")',
),
}
def formulate_query(self, elements):
"""Generates the OpenSearch query.
Args:
elements: Dictionary with a list of conditions
Returns:
The OpenSearch query
"""
conditions = list()
for element_list in elements.values():
conditions += ["({0})".format(" AND ".join(element_list))]
return " OR ".join(conditions)
def extract_filename(self, text):
"""Finds filenames of crashed applications using a regular expression.
Args:
text: String that might contain the filename
Returns:
The string with filename if found
"""
if ".exe" in str(text or "").lower():
match = self.FILENAME_REGEX.search(text)
if match:
# The regex can match on full file paths and filenames,
# so only return the filename.
return min([m for m in match.groups() if m])
return ""
def mark_as_crash(self, event, filename):
"""Mark entries with crash artefacts.
Args:
event: OpenSearch event
filename: Application that crashed
"""
if filename:
event.add_attributes({"crash_app": filename})
event.add_tags(["win_crash"])
def run(self):
"""Entry point for the analyzer.
Returns:
String with summary of the analyzer result
"""
query = self.formulate_query(self.QUERY_ELEMENTS)
return_fields = ["data_type", "message", "filename"]
# Generator of events based on your query.
events = self.event_stream(query_string=query, return_fields=return_fields)
# Container for filenames of crashed applications.
filenames = set()
for event in events:
data_type = event.source.get("data_type")
event_text = None
# Tag entries that show the crash reporting has been disabled.
if data_type == "windows:registry:key_value":
event.add_comment(
"WARNING: The crash reporting was disabled. "
"It could be indicative of attacker activity. "
"For details refer to page 16 from "
"https://assets.documentcloud.org/documents/3461560/"
"Google-Aquarium-Clean.pdf."
)
event.commit()
continue
# Search event log entries for filenames of crashed applications.
if data_type == "windows:evtx:record":
event_text = event.source.get("message")
# Search file system entries for filenames of crashed applications.
elif data_type == "fs:stat":
event_text = event.source.get("filename")
# If found the filename, tag the entry as crash-related
filename = self.extract_filename(event_text)
if filename:
self.mark_as_crash(event, filename)
filenames.add(filename)
event.commit()
# Create a saved view with our query.
if filenames:
self.sketch.add_view(
"Windows Crash activity", "win_crash", query_string='tag:"win_crash"'
)
return (
"Windows Crash analyzer completed, "
+ "{0:d} crashed application{1:s} identified: {2:s}".format(
len(filenames), "s" if len(filenames) > 1 else "", ", ".join(filenames)
)
)
manager.AnalysisManager.register_analyzer(WinCrashSketchPlugin)
|
d3c458e9724c4cfb7501ceb5193fd880c4195661
|
9e5752ec6fa4f9797dd06f49e9d26dba55b05975
|
/mindarmour/privacy/evaluation/_check_config.py
|
61b11e189508627fc5b1eb38c99a8cf11756abc3
|
[
"Apache-2.0"
] |
permissive
|
mindspore-ai/mindarmour
|
8e0d221d4cc77ebf2ce67dbcdf8d2cb8175d7051
|
9cd825b416916c9cda5a7f3623b39b086d16275c
|
refs/heads/master
| 2023-07-09T11:43:45.380811
| 2023-07-07T07:35:27
| 2023-07-07T07:35:27
| 250,692,967
| 151
| 16
|
Apache-2.0
| 2020-04-02T09:50:15
| 2020-03-28T01:59:08
|
Python
|
UTF-8
|
Python
| false
| false
| 7,143
|
py
|
_check_config.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Verify attack config
"""
import numpy as np
from mindarmour.utils._check_param import check_param_type
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = "check_config"
def _is_positive_int(item):
"""Verify that the value is a positive integer."""
if not isinstance(item, int):
return False
return item > 0
def _is_non_negative_int(item):
"""Verify that the value is a non-negative integer."""
if not isinstance(item, int):
return False
return item >= 0
def _is_positive_float(item):
"""Verify that value is a positive number."""
if not isinstance(item, (int, float)):
return False
return item > 0
def _is_non_negative_float(item):
"""Verify that value is a non-negative number."""
if not isinstance(item, (int, float)):
return False
return item >= 0
def _is_range_0_1_float(item):
if not isinstance(item, (int, float)):
return False
return 0 <= item < 1
def _is_positive_int_tuple(item):
"""Verify that the input parameter is a positive integer tuple."""
if not isinstance(item, tuple):
return False
for i in item:
if not _is_positive_int(i):
return False
return True
def _is_dict(item):
"""Check whether the type is dict."""
return isinstance(item, dict)
def _is_list(item):
"""Check whether the type is list"""
return isinstance(item, list)
def _is_str(item):
"""Check whether the type is str."""
return isinstance(item, str)
_VALID_CONFIG_CHECKLIST = {
"knn": {
"n_neighbors": [_is_positive_int],
"weights": [{"uniform", "distance"}, callable],
"algorithm": [{"auto", "ball_tree", "kd_tree", "brute"}],
"leaf_size": [_is_positive_int],
"p": [_is_positive_int],
"metric": [_is_str, callable],
"metric_params": [_is_dict, {None}]
},
"lr": {
"penalty": [{"l1", "l2", "elasticnet", "none"}],
"dual": [{True, False}],
"tol": [_is_positive_float],
"C": [_is_positive_float],
"fit_intercept": [{True, False}],
"intercept_scaling": [_is_positive_float],
"class_weight": [{"balanced", None}, _is_dict],
"random_state": None,
"solver": [{"newton-cg", "lbfgs", "liblinear", "sag", "saga"}]
},
"mlp": {
"hidden_layer_sizes": [_is_positive_int_tuple],
"activation": [{"identity", "logistic", "tanh", "relu"}],
"solver": [{"lbfgs", "sgd", "adam"}],
"alpha": [_is_positive_float],
"batch_size": [{"auto"}, _is_positive_int],
"learning_rate": [{"constant", "invscaling", "adaptive"}],
"learning_rate_init": [_is_positive_float],
"power_t": [_is_positive_float],
"max_iter": [_is_positive_int],
"shuffle": [{True, False}],
"random_state": None,
"tol": [_is_positive_float],
"verbose": [{True, False}],
"warm_start": [{True, False}],
"momentum": [_is_positive_float],
"nesterovs_momentum": [{True, False}],
"early_stopping": [{True, False}],
"validation_fraction": [_is_range_0_1_float],
"beta_1": [_is_range_0_1_float],
"beta_2": [_is_range_0_1_float],
"epsilon": [_is_positive_float],
"n_iter_no_change": [_is_positive_int],
"max_fun": [_is_positive_int]
},
"rf": {
"n_estimators": [_is_positive_int],
"criterion": [{"gini", "entropy"}],
"max_depth": [{None}, _is_positive_int],
"min_samples_split": [_is_positive_float],
"min_samples_leaf": [_is_positive_float],
"min_weight_fraction_leaf": [_is_non_negative_float],
"max_features": [{"auto", "sqrt", "log2", None}, _is_positive_float],
"max_leaf_nodes": [_is_positive_int, {None}],
"min_impurity_decrease": [_is_non_negative_float],
"min_impurity_split": [{None}, _is_positive_float],
"bootstrap": [{True, False}],
"n_jobs": [_is_positive_int, {None}],
"random_state": None,
"verbose": [_is_non_negative_int],
"warm_start": [{True, False}],
"class_weight": [{"balanced", "balanced_subsample"}, _is_dict, _is_list],
"ccp_alpha": [_is_non_negative_float],
"max_samples": [{None}, _is_positive_int, _is_range_0_1_float]
}
}
def _check_config(attack_config, config_checklist):
"""
Verify that config_list is valid.
Check_params is the valid value range of the parameter.
"""
for config in attack_config:
check_param_type("config", config, dict)
if set(config.keys()) != {"params", "method"}:
msg = "Keys of each config in attack_config must be {}," \
"but got {}.".format({'method', 'params'}, set(config.keys()))
LOGGER.error(TAG, msg)
raise KeyError(msg)
method = str.lower(config["method"])
params = config["params"]
if method not in config_checklist.keys():
msg = "Method {} is not supported.".format(method)
LOGGER.error(TAG, msg)
raise NameError(msg)
if not params.keys() <= config_checklist[method].keys():
msg = "Params in method {} is not accepted, the parameters " \
"that can be set are {}.".format(method, set(config_checklist[method].keys()))
LOGGER.error(TAG, msg)
raise KeyError(msg)
for param_key in params.keys():
param_value = params[param_key]
candidate_values = config_checklist[method][param_key]
check_param_type('param_value', param_value, (list, tuple, np.ndarray))
if candidate_values is None:
continue
for item_value in param_value:
flag = False
for candidate_value in candidate_values:
if isinstance(candidate_value, set) and item_value in candidate_value:
flag = True
break
elif not isinstance(candidate_value, set) and candidate_value(item_value):
flag = True
break
if not flag:
msg = "Setting of parameter {} in method {} is invalid".format(param_key, method)
raise ValueError(msg)
def verify_config_params(attack_config):
"""
External interfaces to verify attack config.
"""
_check_config(attack_config, _VALID_CONFIG_CHECKLIST)
|
d9696d199827053954226737109bf68f853117ff
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/桥和割点/1568. 使陆地分离的最少天数.py
|
db58a35fd84a884fc557806120150e9bbb0d53e0
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
1568. 使陆地分离的最少天数.py
|
# 一共3种情况,0,1,2, 并查集求岛屿数量如果大于2 返回0, 如果岛屿数量为1, tarjan算法求割点,
# 如果找到割点返回 1,没有割点则返回,2
from typing import List
import copy
# Day 0: Check islands at day 0, return 0 if you have less than or greater than one island.
# Day 1: If not, try to add water at any given location, and check if that gives you a valid island formation.
# Day 2: Else, just return 2!
class Solution:
# 消除1
def no_islands_recur(self, grid, i, j, m, n):
if grid[i][j] == 0:
return
grid[i][j] = 0
if i - 1 >= 0:
self.no_islands_recur(grid, i - 1, j, m, n)
if i + 1 < m:
self.no_islands_recur(grid, i + 1, j, m, n)
if j - 1 >= 0:
self.no_islands_recur(grid, i, j - 1, m, n)
if j + 1 < n:
self.no_islands_recur(grid, i, j + 1, m, n)
# dfs 寻找连通分量
def no_islands(self, grid):
res = 0
m, n = len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
res += 1
self.no_islands_recur(grid, i, j, m, n)
return res
def minDays(self, grid: List[List[int]]) -> int:
# if we have 0 or more than 1 islands at day 0, return day 0
time = 0
grid_copy = copy.deepcopy(grid)
n = self.no_islands(grid_copy)
if n != 1:
return time
# try to remove any land any see if it works
# 这一步可用tarjan寻找割点
time = 1
for i in range(len(grid)):
for j in range(len(grid[0])):
grid_copy = copy.deepcopy(grid)
grid_copy[i][j] = 0
n = self.no_islands(grid_copy)
if n != 1:
return time
# well then just return 2
time = 2
return time
print(Solution().minDays(grid=[[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]))
|
d4f74967da773ffbad85c7556d1b78f1bcbee1f0
|
91d36c3a8692d724ad02d345fed732d52baa3f55
|
/TF1/transform.py
|
d38a7d855e51490d03665d0124b64eabc391af2b
|
[
"Apache-2.0"
] |
permissive
|
ppwwyyxx/dash-docset-tensorflow
|
79446b7a836edc26252753ae60e39ed164972dc9
|
131726ce5c924da7d234e15887a8af953dfe3b2f
|
refs/heads/master
| 2022-11-05T15:06:42.323084
| 2022-10-11T07:32:50
| 2022-10-11T07:32:50
| 48,576,620
| 218
| 32
|
Apache-2.0
| 2018-11-21T15:47:50
| 2015-12-25T10:15:44
|
CSS
|
UTF-8
|
Python
| false
| false
| 4,728
|
py
|
transform.py
|
#!/usr/bin/env python
# File: transform.py
import sys
import os
import pathlib
import magic
import multiprocessing as mp
import time
from urllib.parse import urlparse
import bs4
import tqdm
import pygments
import pygments.lexers
import pygments.formatters
from selectolax.parser import HTMLParser
LEXER = pygments.lexers.get_lexer_by_name('python', stripall=True)
FORMATTER = pygments.formatters.HtmlFormatter()
def _read(fname):
if 'gzip compressed' in magic.from_file(fname):
import gzip
f = gzip.open(fname)
else:
f = open(fname, 'rb')
html = f.read().decode('utf-8')
f.close()
return html
def _get_level(fname):
dirname = os.path.dirname(fname)
cnt = 0
while not os.path.isfile(os.path.join(dirname, 'main.css')):
dirname = os.path.join(dirname, '..')
cnt += 1
return cnt
def process(fname):
if not fname.endswith('.html'):
return
html = _read(fname)
level = _get_level(fname)
IGNORE = [
'header', 'footer', 'devsite-book-nav', 'nav',
'devsite-header', 'devsite-toc', 'devsite-content-footer',
'devsite-page-rating', 'script'
]
tree = HTMLParser(html)
tree.strip_tags(IGNORE)
for node in tree.css("div.devsite-article-meta"):
node.decompose()
# remove the TF2 button
buttons = tree.css_first("table.tfo-notebook-buttons")
if buttons:
for node in buttons.css("td"):
if "TensorFlow 2" in node.text():
node.decompose()
break
# point to the new css
allcss = tree.css("link[rel='stylesheet']")
if allcss:
css = allcss[0]
css.attrs['href'] = ''.join(['../'] * level) + 'main.css'
for k in allcss[1:]:
k.decompose()
# add method/class declarations
title_node = tree.css_first("h1.devsite-page-title")
if title_node:
# mark method
method_node = tree.css_first('h2#methods')
if method_node:
# print("Find class:", title)
title_node.attrs['class'] = 'dash-class'
title = title_node.text().strip()
children = list(method_node.parent.iter())
for method_idx, node in enumerate(children):
if node.attrs.get('id') == 'methods':
break
for k in range(method_idx, len(children) - 2):
if children[k].tag == 'h3' and children[k + 2].tag == 'pre':
# is a method:
children[k].attrs['class'] = 'dash-method'
# print("Find method ", children[k].text())
name_node = children[k].child.child
name_node.replace_with(title + "." + name_node.text())
else:
title_node.attrs['class'] = 'dash-function'
# Change all self-referential links to relative
ROOT = './www.tensorflow.org/versions/r1.15/' # change it when version is changed
ANCHOR = '/api_docs/python'
for link in tree.css('a'):
href = link.attrs.get('href', '')
href = urlparse(href).path
if ANCHOR in href:
prefix_url = href.find(ANCHOR)
link_fname = os.path.join(ROOT, href[prefix_url + 1:])
if not os.path.isfile(link_fname):
link_fname += ".html"
if os.path.isfile(link_fname):
relpath = os.path.relpath(link_fname, start=os.path.dirname(fname))
link.attrs['href'] = relpath
soup = bs4.BeautifulSoup(tree.html, 'lxml')
for pycode in soup.findAll('pre', attrs={"class": "lang-python"}):
code = pycode.code.text
code = pygments.highlight(code, LEXER, FORMATTER)
# https://github.com/rushter/selectolax/issues/26
pycode.replaceWith(bs4.BeautifulSoup(code, 'lxml'))
MATHJAX = """
<script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
"""
# mathjax only works with internet
head = soup.findAll('head')[0]
mathjax = bs4.BeautifulSoup(MATHJAX, 'lxml').findAll('script')
head.extend(mathjax)
with open(fname, 'w') as f:
f.write(str(soup))
if __name__ == '__main__':
path = os.path.abspath(sys.argv[1])
if os.path.isfile(path):
process(path)
elif os.path.isdir(path):
files = pathlib.Path(path).glob("**/*.html")
files = [os.fspath(x) for x in files]
pool = mp.Pool(int(os.cpu_count() * 1.5))
for _ in tqdm.tqdm(
pool.imap_unordered(process, files, chunksize=20),
total=len(files)):
pass
pool.close()
|
64aa687e48dd27ecea5ca0af3d446c0f5a219d91
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Getting_Started_With_Raspberry_Pi_Pico/indefinite_loop/code.py
|
52b7f86f0b465da59ce3d7bc6a572f7bf83eda9e
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 262
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2021 Kattni Rembor for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Example of infinite loop. Final print statement is never reached."""
print("Loop starting!")
while True:
print("Loop running!")
print("Loop finished!")
|
6d888bea1af68955b3643c0ae7bef4fa351b64e3
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/sharepoint/userprofiles/personal_cache.py
|
0f69e91c16c63f4be81997bf5ed214ec47ef17aa
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
personal_cache.py
|
from office365.runtime.paths.resource_path import ResourcePath
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.sharepoint.base_entity import BaseEntity
class PersonalCache(BaseEntity):
"""Per user cache of key/value pairs organized by folders. Personal cache MAY be used for optimizing initial
load performance of the protocol client, if obtaining initial set of data from personal cache is faster that
requesting the data from the server."""
def __init__(self, context):
super(PersonalCache, self).__init__(context, ResourcePath("SP.UserProfiles.PersonalCache"))
def dispose(self):
"""
"""
qry = ServiceOperationQuery(self, "Dispose")
self.context.add_query(qry)
return self
@property
def cache_name(self):
"""
:rtype: str or None
"""
return self.properties.get("CacheName", None)
@property
def entity_type_name(self):
return "SP.UserProfiles.PersonalCache"
|
68740385193e0bba434720793bf1b34aafffe546
|
a61bf859ceeb1ba98de3863225e07b29e1d7ce8a
|
/thonny/plugins/circuitpython/api_stubs/msgpack/__init__.pyi
|
9847e1a919935df1fbd0c649d21359318ef65ad0
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
thonny/thonny
|
3974b1860703e8450b837863682117f525a886c6
|
8fc9f5c7cbbe1d1c82aa5503ec4b684e28aa608c
|
refs/heads/master
| 2023-08-31T03:04:34.685140
| 2023-08-24T11:38:36
| 2023-08-24T11:38:36
| 163,728,962
| 2,788
| 1,048
|
MIT
| 2023-08-10T18:59:37
| 2019-01-01T10:29:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,897
|
pyi
|
__init__.pyi
|
"""Pack object in msgpack format
The msgpack format is similar to json, except that the encoded data is binary.
See https://msgpack.org for details. The module implements a subset of the cpython
module msgpack-python.
Not implemented: 64-bit int, uint, float.
For more information about working with msgpack,
see `the CPython Library Documentation <https://msgpack-python.readthedocs.io/en/latest/?badge=latest>`_.
Example 1::
import msgpack
from io import BytesIO
b = BytesIO()
msgpack.pack({'list': [True, False, None, 1, 3.14], 'str': 'blah'}, b)
b.seek(0)
print(msgpack.unpack(b))
Example 2: handling objects::
from msgpack import pack, unpack, ExtType
from io import BytesIO
class MyClass:
def __init__(self, val):
self.value = val
def __str__(self):
return str(self.value)
data = MyClass(b'my_value')
def encoder(obj):
if isinstance(obj, MyClass):
return ExtType(1, obj.value)
return f"no encoder for {obj}"
def decoder(code, data):
if code == 1:
return MyClass(data)
return f"no decoder for type {code}"
buffer = BytesIO()
pack(data, buffer, default=encoder)
buffer.seek(0)
decoded = unpack(buffer, ext_hook=decoder)
print(f"{data} -> {buffer.getvalue()} -> {decoded}")
"""
from __future__ import annotations
from typing import Callable, Union
import circuitpython_typing
def pack(
obj: object,
stream: circuitpython_typing.ByteStream,
*,
default: Union[Callable[[object], None], None] = None,
) -> None:
"""Output object to stream in msgpack format.
:param object obj: Object to convert to msgpack format.
:param ~circuitpython_typing.ByteStream stream: stream to write to
:param Optional[~circuitpython_typing.Callable[[object], None]] default:
function called for python objects that do not have
a representation in msgpack format.
"""
...
def unpack(
stream: circuitpython_typing.ByteStream,
*,
ext_hook: Union[Callable[[int, bytes], object], None] = None,
use_list: bool = True,
) -> object:
"""Unpack and return one object from stream.
:param ~circuitpython_typing.ByteStream stream: stream to read from
:param Optional[~circuitpython_typing.Callable[[int, bytes], object]] ext_hook: function called for objects in
msgpack ext format.
:param Optional[bool] use_list: return array as list or tuple (use_list=False).
:return object: object read from stream.
"""
...
class ExtType:
"""ExtType represents ext type in msgpack."""
def __init__(self, code: int, data: bytes) -> None:
"""Constructor
:param int code: type code in range 0~127.
:param bytes data: representation."""
code: int
"""The type code, in range 0~127."""
...
data: bytes
"""Data."""
...
|
ae92b5758f999c19e4993cc84e75e34e0b4631e6
|
2489a9ea2c9b59cc86f2dd0c6efb6b65370b9f6b
|
/clusterman/aws/auto_scaling_resource_group.py
|
2966ac830a9cdf0fe7b0c27ca224b3e0cfd5dbc2
|
[
"Apache-2.0"
] |
permissive
|
Yelp/clusterman
|
744e83fa2b7feb14782b358af8dfb8554da7e8e5
|
6c4b8bb424fd84f1087552fb19d992180cf17834
|
refs/heads/master
| 2023-08-29T03:15:30.865547
| 2023-08-17T09:34:38
| 2023-08-17T09:34:38
| 221,807,101
| 310
| 24
|
Apache-2.0
| 2023-08-30T14:03:24
| 2019-11-14T23:49:29
|
Python
|
UTF-8
|
Python
| false
| false
| 13,645
|
py
|
auto_scaling_resource_group.py
|
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
import colorlog
from cachetools.func import ttl_cache
from clusterman.aws.aws_resource_group import AWSResourceGroup
from clusterman.aws.aws_resource_group import RESOURCE_GROUP_CACHE_SECONDS
from clusterman.aws.client import autoscaling
from clusterman.aws.client import ec2
from clusterman.aws.markets import InstanceMarket
from clusterman.aws.response_types import AutoScalingGroupConfig
from clusterman.aws.response_types import InstanceOverrideConfig
from clusterman.aws.response_types import LaunchTemplateConfig
from clusterman.exceptions import NoLaunchTemplateConfiguredError
from clusterman.interfaces.types import AgentMetadata
from clusterman.interfaces.types import ClusterNodeMetadata
from clusterman.interfaces.types import InstanceMetadata
from clusterman.util import ClustermanResources
_BATCH_MODIFY_SIZE = 200
CLUSTERMAN_STALE_TAG = "clusterman:is_stale"
logger = colorlog.getLogger(__name__)
class AutoScalingResourceGroup(AWSResourceGroup):
"""
Wrapper for AWS Auto Scaling Groups (ASGs)
.. note:: ASGs track their size in terms of number of instances, meaning that two
ASGs with different instance types can have the same capacity but very
different quantities of resources.
.. note:: Clusterman controls which instances to terminate in the event of scale
in. As a result, ASGs must be set to protect instances from scale in, and
AutoScalingResourceGroup will assume that instances are indeed protected.
"""
FRIENDLY_NAME = "asg"
def market_weight(self, market: InstanceMarket) -> float:
"""Returns the weight of a given market
ASGs can be defined with different instance weights. If we can find
the weight for a given instance type, we return it. Otherwise we
default to 1.
:param market: The market for which we want the weight for
:returns: The weight of a given market
"""
if market.az in self._group_config["AvailabilityZones"]:
for instance in self._group_config.get("Instances", []):
if market.instance == instance.get("InstanceType"):
return int(instance.get("WeightedCapacity", "1"))
return 1
else:
return 0
def mark_stale(self, dry_run: bool) -> None:
for i in range(0, len(self.instance_ids), _BATCH_MODIFY_SIZE):
inst_list = self.instance_ids[i : i + _BATCH_MODIFY_SIZE]
logger.info(f"Setting staleness tags for {inst_list}")
if dry_run:
continue
ec2.create_tags(
Resources=inst_list,
Tags=[
{
"Key": CLUSTERMAN_STALE_TAG,
"Value": "True",
}
],
)
def modify_target_capacity(
self,
target_capacity: float,
*,
dry_run: bool = False,
honor_cooldown: bool = False,
) -> None:
"""Modify the desired capacity for the ASG.
:param target_capacity: The new desired number of instances in th ASG.
Must be such that the desired capacity is between the minimum and
maximum capacities of the ASGs. The desired capacity will be rounded
to the minimum or maximum otherwise, whichever is closer.
:param dry_run: Boolean indicating whether or not to take action or just
log
:param honor_cooldown: Boolean for whether or not to wait for a period
of time (cooldown, set in ASG config) after the previous scaling
activity has completed before initiating this one. Defaults to False,
which is the AWS default for manual scaling activities.
"""
# We pretend like stale instances aren't in the ASG, but actually they are so
# we have to double-count them in the target capacity computation
target_capacity += self._stale_capacity
# Round target_cpacity to min or max if necessary
if target_capacity > self.max_capacity:
logger.warning(
f"New target_capacity={target_capacity} exceeds ASG MaxSize={self.max_capacity}, "
"setting to max instead"
)
target_capacity = self.max_capacity
elif target_capacity < self.min_capacity:
logger.warning(
f"New target_capacity={target_capacity} falls below ASG MinSize={self.min_capacity}, "
"setting to min instead"
)
target_capacity = self.min_capacity
kwargs = dict(
AutoScalingGroupName=self.group_id,
DesiredCapacity=int(target_capacity),
HonorCooldown=honor_cooldown,
)
logger.info("Setting target capacity for ASG with arguments:\n" f"{pprint.pformat(kwargs)}")
if dry_run:
return
autoscaling.set_desired_capacity(**kwargs)
def scale_up_options(self) -> Iterable[ClusterNodeMetadata]:
if not self._launch_template_config:
raise NoLaunchTemplateConfiguredError(
f"ASG {self.id} has no launch template associated with it; unable to generate scaling options",
)
# Either there is a list of LaunchTemplate overrides, or this ASG uses a single instance type
options: List[ClusterNodeMetadata] = []
for override in self._launch_template_overrides:
options.extend(
self._get_options_for_instance_type(
override["InstanceType"],
float(override["WeightedCapacity"]),
)
)
# If no overrides were specified, we just use the "default" instance type here
if not options:
options.extend(
self._get_options_for_instance_type(
self._launch_template_config["LaunchTemplateData"]["InstanceType"],
)
)
return options
def scale_down_options(self) -> Iterable[ClusterNodeMetadata]:
"""Generate each of the options for scaling down this resource group, i.e. the list of instance types currently
running in this resource group.
"""
raise NotImplementedError()
def _reload_resource_group(self):
self._group_config = self._get_auto_scaling_group_config()
(
self._launch_template_config,
self._launch_template_overrides,
) = self._get_launch_template_and_overrides()
self._stale_instance_ids = self._get_stale_instance_ids()
def _get_auto_scaling_group_config(self) -> AutoScalingGroupConfig:
if self._aws_api_cache_bucket and self._aws_api_cache_key:
try:
cache_data = self.get_aws_api_cache_data(self._aws_api_cache_bucket, self._aws_api_cache_key)
return cache_data[self.group_id]
except Exception as e:
logger.warning(f"Loading ASG data from AWS API cache failed, falling back to querying APIs: {e}")
response = autoscaling.describe_auto_scaling_groups(
AutoScalingGroupNames=[self.group_id],
)
return response["AutoScalingGroups"][0]
def _get_launch_template_and_overrides(
self,
) -> Tuple[Optional[LaunchTemplateConfig], List[InstanceOverrideConfig]]:
if "LaunchTemplate" in self._group_config:
template = self._group_config["LaunchTemplate"]
overrides: List[InstanceOverrideConfig] = []
elif "MixedInstancesPolicy" in self._group_config:
policy = self._group_config["MixedInstancesPolicy"]
template = policy["LaunchTemplate"]["LaunchTemplateSpecification"]
overrides = policy["LaunchTemplate"]["Overrides"]
else:
logger.warning(f"ASG {self.id} is not using LaunchTemplates, it will be unable to do smart scheduling")
return None, []
launch_template_name = template["LaunchTemplateName"]
launch_template_version = template["Version"]
response = ec2.describe_launch_template_versions(
LaunchTemplateName=launch_template_name,
Versions=[launch_template_version],
)
return response["LaunchTemplateVersions"][0], overrides
def _get_stale_instance_ids(self) -> List[str]:
response = ec2.describe_tags(
Filters=[
{
"Name": "key",
"Values": [CLUSTERMAN_STALE_TAG],
},
{
"Name": "value",
"Values": ["True"],
},
]
)
return [item["ResourceId"] for item in response.get("Tags", []) if item["ResourceId"] in self.instance_ids]
def _get_options_for_instance_type(
self,
instance_type: str,
weight: Optional[float] = None,
) -> List[ClusterNodeMetadata]:
"""Generate a list of possible ClusterNode types that could be added to this ASG,
given a particular instance type"""
options = []
az_options = self._group_config["AvailabilityZones"]
for az in az_options:
instance_market = InstanceMarket(instance_type, az)
weight = weight or self.market_weight(instance_market)
options.append(
ClusterNodeMetadata(
agent=AgentMetadata(total_resources=ClustermanResources.from_instance_type(instance_type)),
instance=InstanceMetadata(market=instance_market, weight=weight),
)
)
return options
@property
def min_capacity(self) -> int:
return self._group_config["MinSize"]
@property
def max_capacity(self) -> int:
return self._group_config["MaxSize"]
@property
def instance_ids(self) -> Sequence[str]:
return [inst["InstanceId"] for inst in self._group_config.get("Instances", []) if inst is not None]
@property
def stale_instance_ids(self) -> Sequence[str]:
return self._stale_instance_ids
@property
def fulfilled_capacity(self) -> float:
return sum([int(instance.get("WeightedCapacity", "1")) for instance in self._group_config.get("Instances", [])])
@property
def status(self) -> str:
"""The status of the ASG
If all the instances are stale, then the ASG is 'stale'; otherwise, if only some instances
are stale, it is 'rolling', and otherwise it is 'active'.
"""
if len(self.stale_instance_ids) > 0:
return "rolling"
else:
return "active"
@property
def is_stale(self) -> bool:
"""Whether or not the ASG is stale
An ASG is never stale; even if all the instances in it are stale, that means we still
want Clusterman to track the existence of this specific ASG and replace the instances in it.
Staleness by definition means the resource group should go away after we clean it up.
"""
return False
@property
def _target_capacity(self) -> float:
# We pretend like stale instances aren't in the ASG, but actually they are so
# we have to remove them manually from the existing target capacity
return self._group_config["DesiredCapacity"] - self._stale_capacity
@classmethod
@ttl_cache(ttl=RESOURCE_GROUP_CACHE_SECONDS)
def _get_resource_group_tags(cls, filter_tag: str = "") -> Mapping[str, Mapping[str, str]]:
"""Retrieves the tags for each ASG"""
asg_id_to_tags = {}
if filter_tag:
filter = [
{
"Name": "key",
"Values": [
filter_tag,
],
},
]
"""If the filter tag is present switch to desribe-tag method for server side
filtering for performance reasons
"""
for page in autoscaling.get_paginator("describe_tags").paginate(Filters=filter):
for instance in page["Tags"]:
asg_id_to_tags[instance["ResourceId"]] = {filter_tag: instance["Value"]}
return asg_id_to_tags
for page in autoscaling.get_paginator("describe_auto_scaling_groups").paginate():
for asg in page["AutoScalingGroups"]:
tags_dict = {tag["Key"]: tag["Value"] for tag in asg["Tags"]}
asg_id_to_tags[asg["AutoScalingGroupName"]] = tags_dict
return asg_id_to_tags
@property
def _stale_capacity(self) -> float:
return sum(
[
int(instance.get("WeightedCapacity", "1"))
for instance in self._group_config.get("Instances", [])
if instance["InstanceId"] in self.stale_instance_ids
]
)
|
e49ed641a1126c4f13fe9f8c83a2f20cd9c5904e
|
83dbdecb561d91bcf584ab4d998311711317908c
|
/toolkit/mais/training/multiclass/experiments/multi_mixed_mrl_nonan.py
|
d0cc8c98c249b298dd08db54496828ab1911a3de
|
[
"CC-BY-4.0",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
petrobras/3W
|
05baca38fdb791b4064eab5e14c1734adeb7acbd
|
3310ef4f81d5ac6ad2c3107b8554051882b46f8d
|
refs/heads/main
| 2023-07-10T13:08:38.661966
| 2023-06-30T21:49:14
| 2023-06-30T21:49:14
| 478,734,100
| 211
| 39
|
Apache-2.0
| 2023-06-30T21:49:16
| 2022-04-06T21:34:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
multi_mixed_mrl_nonan.py
|
""" definitions for experiment 3 module
Multiclass classification
Wavelet features
Most-Recent Label strategy
Drop windows with NaN label
"""
import numpy as np
from sklearn.metrics import accuracy_score, get_scorer
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from processing.feature_mappers import (
TorchWaveletFeatureMapper,
TorchStatisticalFeatureMapper,
MixedMapper,
)
from processing.label_mappers import TorchMulticlassMRLStrategy
from .base_experiment import BaseExperiment
from dataset.dataset import MAEDataset
MAX_LEVEL = 10
def sample(trial, *args, **kwargs):
return Experiment(
level=trial.suggest_int("level", 4, MAX_LEVEL, step=1),
stride=trial.suggest_int("stride", 10, 10),
n_components=trial.suggest_float("n_components", 0.9, 1.0),
normal_balance=trial.suggest_int("normal_balance", 1, 10, step=1),
)
class Experiment(BaseExperiment):
"""the docstring"""
def __init__(
self,
level,
stride,
n_components,
normal_balance,
*args,
**kwargs,
):
super().__init__()
# save params
self.window_size = 2**level
self.level = level
self.n_components = n_components
self.stride = stride
self.normal_balance = normal_balance
self._init_raw_mappers()
self._init_preprocessor()
def raw_transform(self, event, transient_only=True, no_nans=True):
# filter tags and set zeros to nans
tags = event["tags"][self.selected_tags].replace(0, np.nan)
labels = event["labels"]
event_type = event["event_type"]
if transient_only and MAEDataset.TRANSIENT_CLASS[event_type]:
transients = labels.values != event_type
tags = tags[transients]
labels = labels[transients]
features = self._feature_mapper(tags, event_type)
labels = self._label_mapper(labels, event_type)
# drop windows with NaN label
if no_nans:
notnan = labels.notna()
features = features[notnan]
labels = labels[notnan]
return features, labels, event_type
def metric_name(self):
return "accuracy"
def metric_rf(self):
return get_scorer("accuracy")
def metric_lgbm(self):
def acc(preds, train_data):
preds_ = np.argmax(np.reshape(preds, (self.num_classes, -1)), axis=0)
return "accuracy", accuracy_score(train_data.get_label(), preds_), True
return acc
def fit(self, X, y=None):
X = self._scaler.fit_transform(X)
X = self._imputer.fit_transform(X)
self._pca.fit(X)
def transform(self, X, y=None):
X = self._scaler.transform(X)
X = self._imputer.transform(X)
X = self._pca.transform(X)
return X, y
def _init_raw_mappers(self):
offset = 2**MAX_LEVEL - self.window_size
wavelet_mapper = TorchWaveletFeatureMapper(
level=self.level, stride=self.stride, offset=offset
)
stats_mapper = TorchStatisticalFeatureMapper(
window_size=2**self.level, stride=self.stride, offset=offset
)
self._feature_mapper = MixedMapper(stats_mapper, wavelet_mapper)
self._label_mapper = TorchMulticlassMRLStrategy(
window_size=self.window_size,
stride=self.stride,
offset=offset,
)
def _init_preprocessor(self):
# z-score
self._scaler = StandardScaler()
# remove nans
self._imputer = SimpleImputer(strategy="mean")
# pca
self._pca = PCA(n_components=self.n_components, whiten=True)
|
664e8affbeeed58889896fe433d49fe523a4033a
|
08ee04ae665dcb930ed4b98ca7b91b2dac2cc3b0
|
/src/rayoptics/raytr/trace.py
|
66146f40902dc2827f572b76b708840f24f04592
|
[
"BSD-3-Clause"
] |
permissive
|
mjhoptics/ray-optics
|
6bad622f7bb9b3485823b9cc511a6d2b679f7048
|
41ea6d618a93fe14f8bee45fb3efff6a6762bcce
|
refs/heads/master
| 2023-07-09T18:03:36.621685
| 2023-05-08T22:46:36
| 2023-05-08T22:46:36
| 109,168,474
| 195
| 49
|
BSD-3-Clause
| 2023-08-10T16:53:28
| 2017-11-01T18:34:12
|
Python
|
UTF-8
|
Python
| false
| false
| 25,789
|
py
|
trace.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 Michael J. Hayford
""" Supports model ray tracing in terms of relative aperture and field.
.. Created on Mon Sep 17 23:10:59 2018
.. codeauthor: Michael J. Hayford
"""
import itertools
import logging
import math
import numpy as np
from numpy.linalg import norm
from scipy.optimize import newton, fsolve
import pandas as pd
from . import raytrace as rt
from . import RayPkg, RaySeg
from .waveabr import (wave_abr_full_calc, calculate_reference_sphere,
transfer_to_exit_pupil)
from rayoptics.optical import model_constants as mc
from .traceerror import TraceError, TraceMissedSurfaceError, TraceTIRError
def ray_pkg(ray_pkg):
""" return a |Series| containing a ray package (RayPkg) """
return pd.Series(ray_pkg, index=['ray', 'op', 'wvl'])
def ray_df(ray):
""" return a |DataFrame| containing ray data """
r = pd.DataFrame(ray, columns=['inc_pt', 'after_dir',
'after_dst', 'normal'])
r.index.names = ['intrfc']
return r
def list_ray(ray_obj, tfrms=None, start=0):
""" pretty print a ray either in local or global coordinates """
if isinstance(ray_obj, tuple):
ray = ray_obj[0]
else:
ray = ray_obj
colHeader = " X Y Z L" \
" M N Len"
print(colHeader)
colFormats = "{:3d}: {:12.5f} {:12.5f} {:12.5g} {:12.6f} {:12.6f} " \
"{:12.6f} {:12.5g}"
for i, r in enumerate(ray[start:], start=start):
if tfrms is None:
print(colFormats.format(i,
r[mc.p][0], r[mc.p][1], r[mc.p][2],
r[mc.d][0], r[mc.d][1], r[mc.d][2],
r[mc.dst]))
else:
rot, trns = tfrms[i]
p = rot.dot(r[mc.p]) + trns
d = rot.dot(r[mc.d])
print(colFormats.format(i, p[0], p[1], p[2], d[0], d[1], d[2],
r[mc.dst]))
def trace_safe(opt_model, pupil, fld, wvl,
output_filter, rayerr_filter, **kwargs):
"""Wrapper for trace_base that handles exceptions.
Args:
opt_model: :class:`~.OpticalModel` instance
pupil: 2d vector of relative pupil coordinates
fld: :class:`~.Field` point for wave aberration calculation
wvl: wavelength of ray (nm)
output_filter:
- if None, append entire ray
- if 'last', append the last ray segment only
- else treat as callable and append the return value
rayerr_filter:
- if None, on ray error append nothing
- if 'summary', append the exception without ray data
- if 'full', append the exception with ray data up to error
- else append nothing
Returns:
ray_result: see discussion of filters, above.
"""
use_named_tuples = kwargs.get('use_named_tuples', False)
ray_result = None
try:
ray_pkg = trace_base(opt_model, pupil, fld, wvl,
**kwargs)
except TraceError as rayerr:
if rayerr_filter is None:
pass
elif rayerr_filter == 'full':
ray, op_delta, wvl = rayerr.ray_pkg
ray = [RaySeg(*rs) for rs in ray]
rayerr.ray_pkg = RayPkg(ray, op_delta, wvl)
ray_result = rayerr
elif rayerr_filter == 'summary':
rayerr.ray_pkg = None
ray_result = rayerr
else:
pass
else:
if use_named_tuples:
ray, op_delta, wvl = ray_pkg
ray = [RaySeg(*rs) for rs in ray]
ray_pkg = RayPkg(ray, op_delta, wvl)
if output_filter is None:
ray_result = ray_pkg
elif output_filter == 'last':
ray, op_delta, wvl = ray_pkg
final_seg_pkg = (ray[-1], op_delta, wvl)
ray_result = final_seg_pkg
else:
ray_result = output_filter(ray_pkg)
return ray_result
def retrieve_ray(ray_result):
""" Retrieve the ray (the list of ray segs) from ray_result.
This function handles the normal case where the ray traces successfully
and the case of a ray failure, which returns a TraceError instance.
"""
px, py, ray_item = ray_result
if isinstance(ray_item, TraceError):
return ray_item.ray_pkg
else:
return ray_item
def trace(seq_model, pt0, dir0, wvl, **kwargs):
""" returns (ray, ray_opl, wvl)
Args:
seq_model: the :class:`~.SequentialModel` to be traced
pt0: starting coordinate at object interface
dir0: starting direction cosines following object interface
wvl: ray trace wavelength in nm
**kwargs: keyword arguments
Returns:
(**ray**, **op_delta**, **wvl**)
- **ray** is a list for each interface in **path_pkg** of these
elements: [pt, after_dir, after_dst, normal]
- pt: the intersection point of the ray
- after_dir: the ray direction cosine following the interface
- after_dst: after_dst: the geometric distance to the next
interface
- normal: the surface normal at the intersection point
- **op_delta** - optical path wrt equally inclined chords to the
optical axis
- **wvl** - wavelength (in nm) that the ray was traced in
"""
return rt.trace(seq_model, pt0, dir0, wvl, **kwargs)
def trace_base(opt_model, pupil, fld, wvl, apply_vignetting=True, **kwargs):
"""Trace ray specified by relative aperture and field point.
Args:
opt_model: instance of :class:`~.OpticalModel` to trace
pupil: relative pupil coordinates of ray
fld: instance of :class:`~.Field`
wvl: ray trace wavelength in nm
**kwargs: keyword arguments
Returns:
(**ray**, **op_delta**, **wvl**)
- **ray** is a list for each interface in **path_pkg** of these
elements: [pt, after_dir, after_dst, normal]
- pt: the intersection point of the ray
- after_dir: the ray direction cosine following the interface
- after_dst: after_dst: the geometric distance to the next
interface
- normal: the surface normal at the intersection point
- **op_delta** - optical path wrt equally inclined chords to the
optical axis
- **wvl** - wavelength (in nm) that the ray was traced in
"""
vig_pupil = fld.apply_vignetting(pupil) if apply_vignetting else pupil
osp = opt_model.optical_spec
fod = opt_model['analysis_results']['parax_data'].fod
eprad = fod.enp_radius
aim_pt = np.array([0., 0.])
if hasattr(fld, 'aim_pt') and fld.aim_pt is not None:
aim_pt = fld.aim_pt
pt1 = np.array([eprad*vig_pupil[0]+aim_pt[0], eprad*vig_pupil[1]+aim_pt[1],
fod.obj_dist+fod.enp_dist])
pt0 = osp.obj_coords(fld)
dir0 = pt1 - pt0
length = norm(dir0)
dir0 = dir0/length
sm = opt_model.seq_model
# To handle virtual object distances, always propagate from
# the object in a positive Z direction.
if dir0[2] * sm.z_dir[0] < 0:
dir0 = -dir0
return rt.trace(sm, pt0, dir0, wvl, **kwargs)
def iterate_ray(opt_model, ifcx, xy_target, fld, wvl, **kwargs):
""" iterates a ray to xy_target on interface ifcx, returns aim points on
the paraxial entrance pupil plane
If idcx is None, i.e. a floating stop surface, returns xy_target.
If the iteration fails, a TraceError will be raised
"""
def y_stop_coordinate(y1, *args):
seq_model, ifcx, pt0, dist, wvl, y_target = args
pt1 = np.array([0., y1, dist])
dir0 = pt1 - pt0
length = norm(dir0)
dir0 = dir0/length
if dir0[2] * seq_model.z_dir[0] < 0:
dir0 = -dir0
try:
ray, _, _ = rt.trace(seq_model, pt0, dir0, wvl)
except TraceMissedSurfaceError as ray_miss:
ray = ray_miss.ray_pkg
if ray_miss.surf <= ifcx:
raise ray_miss
except TraceTIRError as ray_tir:
ray = ray_tir.ray_pkg
if ray_tir.surf < ifcx:
raise ray_tir
y_ray = ray[ifcx][mc.p][1]
# print(y1, y_ray)
return y_ray - y_target
def surface_coordinate(coord, *args):
seq_model, ifcx, pt0, dist, wvl, target = args
pt1 = np.array([coord[0], coord[1], dist])
dir0 = pt1 - pt0
length = norm(dir0)
dir0 = dir0/length
if dir0[2] * seq_model.z_dir[0] < 0:
dir0 = -dir0
ray, _, _ = rt.trace(seq_model, pt0, dir0, wvl)
xy_ray = np.array([ray[ifcx][mc.p][0], ray[ifcx][mc.p][1]])
# print(coord[0], coord[1], xy_ray[0], xy_ray[1])
return xy_ray - target
seq_model = opt_model.seq_model
osp = opt_model.optical_spec
fod = opt_model['analysis_results']['parax_data'].fod
dist = fod.obj_dist + fod.enp_dist
pt0 = osp.obj_coords(fld)
if ifcx is not None:
if pt0[0] == 0.0 and xy_target[0] == 0.0:
# do 1D iteration if field and target points are zero in x
y_target = xy_target[1]
logging.captureWarnings(True)
try:
start_y, results = newton(y_stop_coordinate, 0.,
args=(seq_model, ifcx, pt0,
dist, wvl, y_target),
disp=False, full_output=True)
except RuntimeError as rte:
# if we come here, start_y is a RuntimeResults object
# print(rte)
start_y = results.root
except TraceError:
start_y = 0.0
start_coords = np.array([0., start_y])
else:
# do 2D iteration. epsfcn is a parameter increment,
# make proportional to pupil radius
try:
start_coords = fsolve(surface_coordinate, np.array([0., 0.]),
epsfcn=0.0001*fod.enp_radius,
args=(seq_model, ifcx, pt0, dist,
wvl, xy_target))
except TraceError:
start_coords = np.array([0., 0.])
else: # floating stop surface - use entrance pupil for aiming
start_coords = np.array([0., 0.]) + xy_target
return start_coords
def trace_with_opd(opt_model, pupil, fld, wvl, foc, **kwargs):
""" returns (ray, ray_opl, wvl, opd) """
chief_ray_pkg = get_chief_ray_pkg(opt_model, fld, wvl, foc)
image_pt_2d = kwargs.get('image_pt', None)
image_delta = kwargs.get('image_delta', None)
ref_sphere = calculate_reference_sphere(opt_model, fld, wvl, foc,
chief_ray_pkg,
image_pt_2d=image_pt_2d,
image_delta=image_delta)
ray, op, wvl = trace_base(opt_model, pupil, fld, wvl, **kwargs)
# opl = rt.calc_optical_path(ray, opt_model.seq_model.path())
ray_pkg = ray, op, wvl
fld.chief_ray = chief_ray_pkg
fld.ref_sphere = ref_sphere
fod = opt_model['analysis_results']['parax_data'].fod
opd = wave_abr_full_calc(fod, fld, wvl, foc, ray_pkg,
chief_ray_pkg, ref_sphere)
ray, ray_op, wvl = ray_pkg
return ray, ray_op, wvl, opd
def trace_boundary_rays_at_field(opt_model, fld, wvl, use_named_tuples=False):
""" returns a list of RayPkgs for the boundary rays for field fld
"""
rim_rays = []
osp = opt_model.optical_spec
for p in osp.pupil.pupil_rays:
try:
ray, op, wvl = trace_base(opt_model, p, fld, wvl)
except TraceError as ray_error:
ray, op, wvl = ray_error.ray_pkg
if use_named_tuples:
ray = [RaySeg(*rs) for rs in ray]
rim_rays.append(RayPkg(ray, op, wvl))
return rim_rays
def boundary_ray_dict(opt_model, rim_rays):
pupil_rays = {}
for ray, lbl in zip(rim_rays, opt_model.optical_spec.pupil.ray_labels):
pupil_rays[lbl] = ray
return pupil_rays
def trace_boundary_rays(opt_model, **kwargs):
rayset = []
wvl = opt_model.seq_model.central_wavelength()
fov = opt_model.optical_spec.field_of_view
for fi, fld in enumerate(fov.fields):
rim_rays = trace_boundary_rays_at_field(opt_model, fld, wvl, **kwargs)
fld.pupil_rays = boundary_ray_dict(opt_model, rim_rays)
rayset.append(rim_rays)
return rayset
def trace_ray_list_at_field(opt_model, ray_list, fld, wvl, foc):
""" returns a list of ray |DataFrame| for the ray_list at field fld """
rayset = []
for p in ray_list:
ray, op, wvl = trace_base(opt_model, p, fld, wvl)
rayset.append(ray)
rdf_list = [ray_df(r) for r in rayset]
return rdf_list
def trace_field(opt_model, fld, wvl, foc):
""" returns a |DataFrame| with the boundary rays for field fld """
osp = opt_model.optical_spec
pupil_rays = osp.pupil.pupil_rays
rdf_list = trace_ray_list_at_field(opt_model, pupil_rays, fld, wvl, foc)
rset = pd.concat(rdf_list, keys=osp.pupil.ray_labels,
names=['pupil'])
return rset
def trace_all_fields(opt_model):
""" returns a |DataFrame| with the boundary rays for all fields """
osp = opt_model.optical_spec
fld, wvl, foc = osp.lookup_fld_wvl_focus(0)
fset = []
for f in osp.field_of_view.fields:
rset = trace_field(opt_model, f, wvl, foc)
fset.append(rset)
fdf = pd.concat(fset, keys=osp.field_of_view.index_labels,
names=['field'])
return fdf
def trace_chief_ray(opt_model, fld, wvl, foc):
"""Trace a chief ray for fld and wvl, returning the ray_pkg and exit pupil segment."""
fod = opt_model['analysis_results']['parax_data'].fod
ray, op, wvl = trace_base(opt_model, [0., 0.], fld, wvl)
# op = rt.calc_optical_path(ray, opt_model.seq_model.path())
cr = RayPkg(ray, op, wvl)
# cr_exp_pt: E upper bar prime: pupil center for pencils from Q
# cr_exp_pt, cr_b4_dir, cr_exp_dist
cr_exp_seg = transfer_to_exit_pupil(opt_model.seq_model.ifcs[-2],
(cr.ray[-2][mc.p],
cr.ray[-2][mc.d]), fod.exp_dist)
return cr, cr_exp_seg
def trace_fan(opt_model, fan_rng, fld, wvl, foc, img_filter=None,
**kwargs):
start = np.array(fan_rng[0])
stop = fan_rng[1]
num = fan_rng[2]
step = (stop - start)/(num - 1)
fan = []
for r in range(num):
pupil = np.array(start)
ray, op, wvl = trace_base(opt_model, pupil, fld, wvl, **kwargs)
# opl = rt.calc_optical_path(ray, opt_model.seq_model.path())
ray_pkg = ray, op, wvl
if img_filter:
result = img_filter(pupil, ray_pkg)
fan.append([pupil, result])
else:
fan.append([pupil, ray_pkg])
start += step
return fan
def trace_grid(opt_model, grid_rng, fld, wvl, foc, img_filter=None,
form='grid', append_if_none=True, **kwargs):
output_filter = kwargs.get('output_filter', None)
rayerr_filter = kwargs.get('rayerr_filter', None)
start = np.array(grid_rng[0])
stop = grid_rng[1]
num = grid_rng[2]
step = np.array((stop - start)/(num - 1))
grid = []
for i in range(num):
if form == 'list':
working_grid = grid
elif form == 'grid':
grid_row = []
working_grid = grid_row
for j in range(num):
pupil = np.array(start)
ray_result = trace_safe(opt_model, pupil, fld, wvl,
output_filter, rayerr_filter,
check_apertures=True, **kwargs)
if ray_result is not None:
if img_filter:
result = img_filter(pupil, ray_result)
working_grid.append(result)
else:
working_grid.append([pupil[0], pupil[1], ray_result])
else: # ray outside pupil or failed
if img_filter:
result = img_filter(pupil, None)
if result is not None or append_if_none:
working_grid.append(result)
else:
if append_if_none:
working_grid.append([pupil[0], pupil[1], None])
start[1] += step[1]
if form == 'grid':
grid.append(grid_row)
start[0] += step[0]
start[1] = grid_rng[0][1]
return np.array(grid)
def setup_pupil_coords(opt_model, fld, wvl, foc,
image_pt=None, image_delta=None):
chief_ray_pkg = get_chief_ray_pkg(opt_model, fld, wvl, foc)
image_pt_2d = None if image_pt is None else image_pt[:2]
ref_sphere = calculate_reference_sphere(opt_model, fld, wvl, foc,
chief_ray_pkg,
image_pt_2d=image_pt_2d,
image_delta=image_delta)
return ref_sphere, chief_ray_pkg
def aim_chief_ray(opt_model, fld, wvl=None):
""" aim chief ray at center of stop surface and save results on **fld** """
seq_model = opt_model.seq_model
if wvl is None:
wvl = seq_model.central_wavelength()
stop = seq_model.stop_surface
aim_pt = iterate_ray(opt_model, stop, np.array([0., 0.]), fld, wvl)
return aim_pt
def apply_paraxial_vignetting(opt_model):
fov = opt_model.optical_spec.field_of_view
pm = opt_model.parax_model
max_field, jth = fov.max_field()
for j, fld in enumerate(fov.fields):
rel_fov = math.sqrt(fld.x**2 + fld.y**2)
if not fov.is_relative and max_field != 0:
rel_fov = rel_fov/max_field
min_vly, min_vuy = pm.paraxial_vignetting(rel_fov)
if min_vly[1] is not None:
fld.vly = 1 - min_vly[0]
if min_vuy[1] is not None:
fld.vuy = 1 - min_vuy[0]
# print("Field {:2d}: {:8.3f}, ly:{:8.3f} uy:{:8.3f}".format(
# j, rel_fov, fld.vly, fld.vuy))
def get_chief_ray_pkg(opt_model, fld, wvl, foc):
"""Get the chief ray package at **fld**, computing it if necessary.
Args:
opt_model: :class:`~.OpticalModel` instance
fld: :class:`~.Field` point for wave aberration calculation
wvl: wavelength of ray (nm)
foc: defocus amount
Returns:
chief_ray_pkg: tuple of chief_ray, cr_exp_seg
- chief_ray: chief_ray, chief_ray_op, wvl
- cr_exp_seg: chief ray exit pupil segment (pt, dir, dist)
- pt: chief ray intersection with exit pupil plane
- dir: direction cosine of the chief ray in exit pupil space
- dist: distance from interface to the exit pupil point
"""
if fld.chief_ray is None:
aim_chief_ray(opt_model, fld, wvl=wvl)
chief_ray_pkg = trace_chief_ray(opt_model, fld, wvl, foc)
elif fld.chief_ray[0][2] != wvl:
chief_ray_pkg = trace_chief_ray(opt_model, fld, wvl, foc)
else:
chief_ray_pkg = fld.chief_ray
return chief_ray_pkg
def refocus(opt_model):
""" Compute a focus shift bringing the axial marginal ray to zero. """
osp = opt_model['optical_spec']
fld = osp['fov'].fields[0] # assumed to be the axial field
wvl = osp['wvls'].central_wvl
df_ray, ray_op, wvl = trace_safe(opt_model, [0., 1.], fld, wvl,
output_filter=None, rayerr_filter='full',
use_named_tuples=True)
defocus = -df_ray[-1].p[1]/(df_ray[-2].d[1]/df_ray[-2].d[2])
return defocus
def trace_astigmatism_coddington_fan(opt_model, fld, wvl, foc):
""" calculate astigmatism by Coddington trace at **fld** """
cr = RayPkg(*trace_base(opt_model, [0., 0.], fld, wvl))
s_dfoc, t_dfoc = trace_coddington_fan(opt_model, cr, foc=foc)
return s_dfoc, t_dfoc
def trace_coddington_fan(opt_model, ray_pkg, foc=None):
""" astigmatism calculation via Coddington trace
.. note:: spherical surfaces only
"""
seq_model = opt_model.seq_model
wl = seq_model.index_for_wavelength(ray_pkg.wvl)
path = itertools.zip_longest(ray_pkg.ray, seq_model.ifcs,
[n[wl] for n in seq_model.rndx],
seq_model.lcl_tfrms,
seq_model.z_dir)
before_rind = seq_model.rndx[0][wl]
before_dir = None
s_before, t_before = None, None
for r, ifc, after_rind, tfrm, z_dir in path:
after_rind = after_rind if after_rind is not None else before_rind
pt, after_dir, after_dst, normal = r
if before_dir is not None:
normal_len = norm(normal)
cosI_prime = np.dot(after_dir, normal)/normal_len
sinI_prime = math.sqrt(1.0 - cosI_prime**2)
sinI = after_rind*sinI_prime/before_rind
cosI = math.sqrt(1.0 - sinI**2)
obl_power = ifc.optical_power
if obl_power != 0.0:
obl_power *= ((after_rind*cosI_prime - before_rind*cosI) /
(after_rind - before_rind))
# print("pwr, obl_pwr, after_dst:",
# ifc.optical_power/(after_rind - before_rind),
# obl_power, after_dst)
# else:
# print("pwr, obl_pwr, after_dst:",
# ifc.optical_power, obl_power, after_dst)
n_by_s_prime = before_rind/s_before + obl_power
s_prime = after_rind/n_by_s_prime
# print("s, s':", s_before, s_prime)
s_before = s_prime - after_dst
n_cosIp2_by_t_prime = before_rind*cosI**2/t_before + obl_power
t_prime = after_rind*cosI_prime**2/n_cosIp2_by_t_prime
# print("t, t':", t_before, t_prime)
t_before = t_prime - after_dst
else:
s_before = -after_dst
t_before = -after_dst
before_rind = after_rind
before_dir = after_dir
s_dfoc = s_prime*after_dir[2] + pt[2]
t_dfoc = t_prime*after_dir[2] + pt[2]
if foc is not None:
focus_shift = foc
s_dfoc -= focus_shift
t_dfoc -= focus_shift
# print("delta s, t:", s_dfoc, t_dfoc)
return s_dfoc, t_dfoc
def intersect_2_lines(P1, V1, P2, V2):
""" intersect 2 non-parallel lines, returning distance from P1
s = ((P2 - P1) x V1).(V1 x V2)/\|(V1 x V2)\|**2
`Weisstein, Eric W. "Line-Line Intersection." From MathWorld--A Wolfram Web
Resource. <http://mathworld.wolfram.com/Line-LineIntersection.html>`_
"""
Vx = np.cross(V1, V2)
s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)
return s
def trace_astigmatism_curve(opt_model, num_points=21, **kwargs):
""" Trace a fan of fields and collect astigmatism data for each field.
Args:
opt_model: the optical model
num_points: the number of FOV sampling points
kwargs: keyword args for ray trace
Returns:
tuple: field point, sagittal and tangential focus shifts
"""
from rayoptics.raytr.opticalspec import Field
s_data = []
t_data = []
field_data = []
osp = opt_model.optical_spec
_, wvl, foc = osp.lookup_fld_wvl_focus(0)
fld = Field()
max_field = osp['fov'].max_field()[0]
for f in np.linspace(0., max_field, num=num_points):
fld.y = f
ref_sphere, cr_pkg = setup_pupil_coords(opt_model, fld, wvl, foc)
fld.chief_ray = cr_pkg
fld.ref_sphere = ref_sphere
s_foc, t_foc = trace_astigmatism(opt_model, fld, wvl, foc, **kwargs)
s_data.append(s_foc)
t_data.append(t_foc)
field_data.append(f)
return field_data, s_data, t_data
def trace_astigmatism(opt_model, fld, wvl, foc, dx=0.001, dy=0.001):
""" calculate astigmatism by tracing close rays about the chief ray at **fld**
This function implicitly assumes that the **fld** point is on a plane of
symmetry, i.e. the system is rotationally symmetric, bilaterally symmetric,
or quad symmetric. No check is done to ensure this.
Args:
opt_model: the optical model
fld: a Field object
wvl: wavelength in nm
foc: defocus amount
dx: delta in pupil coordinates for x/sagittal direction
dy: delta in pupil coordinates for y/tangential direction
Returns:
tuple: sagittal and tangential focus shifts at **fld**
"""
rlist = []
rlist.append(RayPkg(*trace_base(opt_model, [0., 0.], fld, wvl)))
rlist.append(RayPkg(*trace_base(opt_model, [dx, 0.], fld, wvl)))
rlist.append(RayPkg(*trace_base(opt_model, [0., dy], fld, wvl)))
rlist.append(RayPkg(*trace_base(opt_model, [-dx, 0.], fld, wvl)))
rlist.append(RayPkg(*trace_base(opt_model, [0., -dy], fld, wvl)))
s = intersect_2_lines(rlist[1].ray[-1][mc.p], rlist[1].ray[-1][mc.d],
rlist[3].ray[-1][mc.p], rlist[3].ray[-1][mc.d])
s_foc = s * rlist[1].ray[-1][mc.d][2]
t = intersect_2_lines(rlist[2].ray[-1][mc.p], rlist[2].ray[-1][mc.d],
rlist[4].ray[-1][mc.p], rlist[4].ray[-1][mc.d])
t_foc = t * rlist[2].ray[-1][mc.d][2]
if foc is not None:
focus_shift = foc
s_foc -= focus_shift
t_foc -= focus_shift
return s_foc, t_foc
|
157f09aa5b19bb16d0f31b20011419f6bba7dc14
|
f0456cd042d3cbde062016b5ab423040d1728429
|
/tests/version_req_test.py
|
2cdcaa7f55d4051e6bea09fb5ded077fc3ae4056
|
[
"MIT"
] |
permissive
|
Madoshakalaka/pipenv-setup
|
59b4608bd29ab44d49e2e957af24dde3b3c483d2
|
109056e7b33b2ae4242eb56326c4f9bc66dde841
|
refs/heads/master
| 2023-03-10T20:55:01.611461
| 2022-07-06T18:48:21
| 2022-07-06T18:48:21
| 203,203,922
| 117
| 32
|
MIT
| 2023-03-03T23:05:07
| 2019-08-19T15:53:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,585
|
py
|
version_req_test.py
|
# noinspection PyProtectedMember
import pytest
from pipenv_setup.constants import VersionConflict as VC
from pipenv_setup.inconsistency_checker import _VersionReqs
@pytest.mark.parametrize(
("setup_version", "pipfile_version", "expected"),
[
["==1.0.2", "==1.0.2", None],
[">=1.0.2", "==1.0.2", VC.POTENTIAL],
["==1.0.2", ">=1.0.2", VC.COMPATIBLE],
[">1.0", ">=1.0.2", VC.POTENTIAL],
[">1.0", ">0.9.2", VC.COMPATIBLE],
[">1.0", "~=0.9.2", VC.DISJOINT],
[">1.0", "~=1.9.2", VC.POTENTIAL],
["==1.0", "~=1.9.2", VC.DISJOINT],
["==2.0", "~=1.9.2", VC.DISJOINT],
["==1.9", "~=1.8.2", VC.DISJOINT],
["==1.9.4", "~=1.8.2", VC.DISJOINT],
["==1.8.5", "~=1.8.2", VC.COMPATIBLE],
["==1.8", "~=1.8.2", VC.DISJOINT],
["==1.0", "~=1.9.2", VC.DISJOINT],
["==1.2", "~=1.0.2", VC.DISJOINT],
["==1.2", "~=1.0", VC.COMPATIBLE],
["==1.2", "*", VC.COMPATIBLE],
[">=1.2, !=1.3, <2.0", "*", VC.COMPATIBLE],
[">=1.2, !=1.3, <2.0", "~=1.1", VC.COMPATIBLE],
[">=1.2, !=1.3, <2.0", "~=1.3", VC.POTENTIAL],
[">=1.2, !=1.3, <2.0", "~=1.3, != 1.9", VC.POTENTIAL],
[">=1.2, <2", "~=1.2", None],
[">=1.2, <1.5", "~=1.2", VC.COMPATIBLE],
["", "~=1.2", VC.POTENTIAL],
["", "*", None],
],
)
def test_check_compatibility(
setup_version, pipfile_version, expected
): # type: (str, str, bool) -> None
vr = _VersionReqs(setup_version)
assert vr.analyze_compatibility(pipfile_version) == expected
|
53c9a36ad0defc9fe1fc5f781ed055550e357388
|
9940f6579e010bb7c1fa13885c49bbaf6164723b
|
/lbry/torrent/session.py
|
713d820392ba2623ee88e6282be382b3c94f2cd9
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lbryio/lbry-sdk
|
feaf1143b178b496a9d81c99faf51fac60e6fed1
|
eb5da9511e162ef1080cb34af2ee087383cfa94a
|
refs/heads/master
| 2023-08-18T13:06:16.106204
| 2023-02-07T18:50:25
| 2023-04-03T17:34:36
| 41,103,286
| 5,272
| 291
|
MIT
| 2023-06-28T16:36:20
| 2015-08-20T15:24:10
|
Python
|
UTF-8
|
Python
| false
| false
| 9,104
|
py
|
session.py
|
import asyncio
import binascii
import os
import logging
import random
from hashlib import sha1
from tempfile import mkdtemp
from typing import Optional
import libtorrent
log = logging.getLogger(__name__)
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
libtorrent.add_torrent_params_flags_t.flag_auto_managed
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe
)
class TorrentHandle:
def __init__(self, loop, executor, handle):
self._loop = loop
self._executor = executor
self._handle: libtorrent.torrent_handle = handle
self.started = asyncio.Event(loop=loop)
self.finished = asyncio.Event(loop=loop)
self.metadata_completed = asyncio.Event(loop=loop)
self.size = 0
self.total_wanted_done = 0
self.name = ''
self.tasks = []
self.torrent_file: Optional[libtorrent.file_storage] = None
self._base_path = None
self._handle.set_sequential_download(1)
@property
def largest_file(self) -> Optional[str]:
if not self.torrent_file:
return None
index = self.largest_file_index
return os.path.join(self._base_path, self.torrent_file.at(index).path)
@property
def largest_file_index(self):
largest_size, index = 0, 0
for file_num in range(self.torrent_file.num_files()):
if self.torrent_file.file_size(file_num) > largest_size:
largest_size = self.torrent_file.file_size(file_num)
index = file_num
return index
def stop_tasks(self):
while self.tasks:
self.tasks.pop().cancel()
def _show_status(self):
# fixme: cleanup
if not self._handle.is_valid():
return
status = self._handle.status()
if status.has_metadata:
self.size = status.total_wanted
self.total_wanted_done = status.total_wanted_done
self.name = status.name
if not self.metadata_completed.is_set():
self.metadata_completed.set()
log.info("Metadata completed for btih:%s - %s", status.info_hash, self.name)
self.torrent_file = self._handle.get_torrent_info().files()
self._base_path = status.save_path
first_piece = self.torrent_file.at(self.largest_file_index).offset
if not self.started.is_set():
if self._handle.have_piece(first_piece):
self.started.set()
else:
# prioritize it
self._handle.set_piece_deadline(first_piece, 100)
if not status.is_seeding:
log.debug('%.2f%% complete (down: %.1f kB/s up: %.1f kB/s peers: %d seeds: %d) %s - %s',
status.progress * 100, status.download_rate / 1000, status.upload_rate / 1000,
status.num_peers, status.num_seeds, status.state, status.save_path)
elif not self.finished.is_set():
self.finished.set()
log.info("Torrent finished: %s", self.name)
async def status_loop(self):
while True:
self._show_status()
if self.finished.is_set():
break
await asyncio.sleep(0.1)
async def pause(self):
await self._loop.run_in_executor(
self._executor, self._handle.pause
)
async def resume(self):
await self._loop.run_in_executor(
self._executor, lambda: self._handle.resume() # pylint: disable=unnecessary-lambda
)
class TorrentSession:
def __init__(self, loop, executor):
self._loop = loop
self._executor = executor
self._session: Optional[libtorrent.session] = None
self._handles = {}
self.tasks = []
self.wait_start = True
async def add_fake_torrent(self):
tmpdir = mkdtemp()
info, btih = _create_fake_torrent(tmpdir)
flags = libtorrent.add_torrent_params_flags_t.flag_seed_mode
handle = self._session.add_torrent({
'ti': info, 'save_path': tmpdir, 'flags': flags
})
self._handles[btih] = TorrentHandle(self._loop, self._executor, handle)
return btih
async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
settings = {
'listen_interfaces': f"{interface}:{port}",
'enable_natpmp': False,
'enable_upnp': False
}
self._session = await self._loop.run_in_executor(
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
)
self.tasks.append(self._loop.create_task(self.process_alerts()))
def stop(self):
while self.tasks:
self.tasks.pop().cancel()
self._session.save_state()
self._session.pause()
self._session.stop_dht()
self._session.stop_lsd()
self._session.stop_natpmp()
self._session.stop_upnp()
self._session = None
def _pop_alerts(self):
for alert in self._session.pop_alerts():
log.info("torrent alert: %s", alert)
async def process_alerts(self):
while True:
await self._loop.run_in_executor(
self._executor, self._pop_alerts
)
await asyncio.sleep(1)
async def pause(self):
await self._loop.run_in_executor(
self._executor, lambda: self._session.save_state() # pylint: disable=unnecessary-lambda
)
await self._loop.run_in_executor(
self._executor, lambda: self._session.pause() # pylint: disable=unnecessary-lambda
)
async def resume(self):
await self._loop.run_in_executor(
self._executor, self._session.resume
)
def _add_torrent(self, btih: str, download_directory: Optional[str]):
params = {'info_hash': binascii.unhexlify(btih.encode()), 'flags': DEFAULT_FLAGS}
if download_directory:
params['save_path'] = download_directory
handle = self._session.add_torrent(params)
handle.force_dht_announce()
self._handles[btih] = TorrentHandle(self._loop, self._executor, handle)
def full_path(self, btih):
return self._handles[btih].largest_file
async def add_torrent(self, btih, download_path):
await self._loop.run_in_executor(
self._executor, self._add_torrent, btih, download_path
)
self._handles[btih].tasks.append(self._loop.create_task(self._handles[btih].status_loop()))
await self._handles[btih].metadata_completed.wait()
if self.wait_start:
# fixme: temporary until we add streaming support, otherwise playback fails!
await self._handles[btih].started.wait()
def remove_torrent(self, btih, remove_files=False):
if btih in self._handles:
handle = self._handles[btih]
handle.stop_tasks()
self._session.remove_torrent(handle._handle, 1 if remove_files else 0)
self._handles.pop(btih)
async def save_file(self, btih, download_directory):
handle = self._handles[btih]
await handle.resume()
def get_size(self, btih):
return self._handles[btih].size
def get_name(self, btih):
return self._handles[btih].name
def get_downloaded(self, btih):
return self._handles[btih].total_wanted_done
def is_completed(self, btih):
return self._handles[btih].finished.is_set()
def get_magnet_uri(btih):
return f"magnet:?xt=urn:btih:{btih}"
def _create_fake_torrent(tmpdir):
# beware, that's just for testing
path = os.path.join(tmpdir, 'tmp')
with open(path, 'wb') as myfile:
size = myfile.write(bytes([random.randint(0, 255) for _ in range(40)]) * 1024)
file_storage = libtorrent.file_storage()
file_storage.add_file('tmp', size)
t = libtorrent.create_torrent(file_storage, 0, 4 * 1024 * 1024)
libtorrent.set_piece_hashes(t, tmpdir)
info = libtorrent.torrent_info(t.generate())
btih = sha1(info.metadata()).hexdigest()
return info, btih
async def main():
if os.path.exists("~/Downloads/ubuntu-18.04.3-live-server-amd64.torrent"):
os.remove("~/Downloads/ubuntu-18.04.3-live-server-amd64.torrent")
if os.path.exists("~/Downloads/ubuntu-18.04.3-live-server-amd64.iso"):
os.remove("~/Downloads/ubuntu-18.04.3-live-server-amd64.iso")
btih = "dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c"
executor = None
session = TorrentSession(asyncio.get_event_loop(), executor)
session2 = TorrentSession(asyncio.get_event_loop(), executor)
await session.bind('localhost', port=4040)
await session2.bind('localhost', port=4041)
btih = await session.add_fake_torrent()
session2._session.add_dht_node(('localhost', 4040))
await session2.add_torrent(btih, "/tmp/down")
while True:
await asyncio.sleep(100)
await session.pause()
executor.shutdown()
if __name__ == "__main__":
asyncio.run(main())
|
5217f3170f2c51110d16459d577868857d70d54c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1Trigger/L1TGlobal/test/testL1TGlobalProducer_cfg.py
|
ff2245d6513e15b04579eddd6c9ec3e7b799db65
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 5,889
|
py
|
testL1TGlobalProducer_cfg.py
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as vpo
opts = vpo.VarParsing('standard')
opts.setDefault('maxEvents', 1000)
opts.register('resetPSCountersEachLumiSec', False,
vpo.VarParsing.multiplicity.singleton,
vpo.VarParsing.varType.bool,
'reset prescale counters at the start of every luminosity section')
opts.register('semiRandomInitialPSCounters', False,
vpo.VarParsing.multiplicity.singleton,
vpo.VarParsing.varType.bool,
'use semi-random initialisation of prescale counters')
opts.register('prescaleSet', 2,
vpo.VarParsing.multiplicity.singleton,
vpo.VarParsing.varType.int,
'index of prescale column (starts from zero)')
opts.parseArguments()
process = cms.Process('TEST')
process.options.numberOfThreads = 1
process.options.numberOfStreams = 0
process.options.wantSummary = False
process.maxEvents.input = opts.maxEvents
# Global Tag
from Configuration.AlCa.GlobalTag import GlobalTag
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2022_realistic', '')
# Input source
process.source = cms.Source('PoolSource',
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_12_6_0_pre2/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/125X_mcRun3_2022_realistic_v3-v1/2580000/2d96539c-b321-401f-b7b2-51884a5d421f.root',
)
)
# EventSetup modules
process.GlobalParametersRcdSource = cms.ESSource('EmptyESSource',
recordName = cms.string('L1TGlobalParametersRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.GlobalParameters = cms.ESProducer('StableParametersTrivialProducer',
# trigger decision
NumberPhysTriggers = cms.uint32(512), # number of physics trigger algorithms
# trigger objects
NumberL1Muon = cms.uint32(8), # muons
NumberL1EGamma = cms.uint32(12), # e/gamma and isolated e/gamma objects
NumberL1Jet = cms.uint32(12), # jets
NumberL1Tau = cms.uint32(12), # taus
# hardware
NumberChips = cms.uint32(1), # number of maximum chips defined in the xml file
PinsOnChip = cms.uint32(512), # number of pins on the GTL condition chips
# correspondence 'condition chip - GTL algorithm word' in the hardware
# e.g.: chip 2: 0 - 95; chip 1: 96 - 128 (191)
OrderOfChip = cms.vint32(1),
)
process.L1TUtmTriggerMenuRcdSource = cms.ESSource('EmptyESSource',
recordName = cms.string('L1TUtmTriggerMenuRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.L1TriggerMenu = cms.ESProducer('L1TUtmTriggerMenuESProducer',
L1TriggerMenuFile = cms.string('test/L1Menu_L1TGlobalUnitTests_v1_0_0.xml'),
)
process.L1TGlobalPrescalesVetosFractRcdSource = cms.ESSource('EmptyESSource',
recordName = cms.string('L1TGlobalPrescalesVetosFractRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.L1TGlobalPrescalesVetosFract = cms.ESProducer('L1TGlobalPrescalesVetosFractESProducer',
TriggerMenuLuminosity = cms.string('startup'),
Verbosity = cms.int32(0),
AlgoBxMaskDefault = cms.int32(1),
PrescaleXMLFile = cms.string('test/UGT_BASE_RS_PRESCALES_L1MenuL1TGlobalUnitTests_v1_0_0.xml'),
AlgoBxMaskXMLFile = cms.string('test/UGT_BASE_RS_ALGOBX_MASK_L1MenuL1TGlobalUnitTests_v1_0_0.xml'),
FinOrMaskXMLFile = cms.string('test/UGT_BASE_RS_FINOR_MASK_L1MenuL1TGlobalUnitTests_v1_0_0.xml'),
VetoMaskXMLFile = cms.string('test/UGT_BASE_RS_VETO_MASK_L1MenuL1TGlobalUnitTests_v1_0_0.xml'),
)
# EventData modules
process.simGtExtFakeStage2Digis = cms.EDProducer('L1TExtCondProducer',
bxFirst = cms.int32(-2),
bxLast = cms.int32(2),
setBptxAND = cms.bool(True),
setBptxMinus = cms.bool(True),
setBptxOR = cms.bool(True),
setBptxPlus = cms.bool(True),
tcdsRecordLabel = cms.InputTag('')
)
process.simGtStage2Digis = cms.EDProducer('L1TGlobalProducer',
AlgoBlkInputTag = cms.InputTag(''),
AlgorithmTriggersUnmasked = cms.bool(False),
AlgorithmTriggersUnprescaled = cms.bool(False),
EGammaInputTag = cms.InputTag('simCaloStage2Digis'),
EtSumInputTag = cms.InputTag('simCaloStage2Digis'),
ExtInputTag = cms.InputTag('simGtExtFakeStage2Digis'),
GetPrescaleColumnFromData = cms.bool(False),
JetInputTag = cms.InputTag('simCaloStage2Digis'),
MuonInputTag = cms.InputTag('simGmtStage2Digis'),
MuonShowerInputTag = cms.InputTag('simGmtShowerDigis'),
TauInputTag = cms.InputTag('simCaloStage2Digis'),
useMuonShowers = cms.bool(True),
RequireMenuToMatchAlgoBlkInput = cms.bool(False),
resetPSCountersEachLumiSec = cms.bool(opts.resetPSCountersEachLumiSec),
semiRandomInitialPSCounters = cms.bool(opts.semiRandomInitialPSCounters),
PrescaleSet = cms.uint32(opts.prescaleSet)
)
# Task definition
process.l1tTask = cms.Task( process.simGtExtFakeStage2Digis, process.simGtStage2Digis )
# Path definition
process.l1tPath = cms.Path( process.l1tTask )
# Analyser of L1T-menu results
process.l1tGlobalSummary = cms.EDAnalyzer( 'L1TGlobalSummary',
AlgInputTag = cms.InputTag( 'simGtStage2Digis' ),
ExtInputTag = cms.InputTag( 'simGtStage2Digis' ),
MinBx = cms.int32( 0 ),
MaxBx = cms.int32( 0 ),
DumpTrigResults = cms.bool( False ),
DumpRecord = cms.bool( False ),
DumpTrigSummary = cms.bool( True ),
ReadPrescalesFromFile = cms.bool( False ),
psFileName = cms.string( '' ),
psColumn = cms.int32( 0 )
)
# EndPath definition
process.l1tEndPath = cms.EndPath( process.l1tGlobalSummary )
# MessageLogger
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 100 # only report every 100th event start
process.MessageLogger.L1TGlobalSummary = cms.untracked.PSet()
|
3c6d618366c050833762816c9400736c3790b197
|
05b0c763ab92086e69a8d00ae6465009c596f6bc
|
/tests/cpu/test_compile.py
|
2cb37d16ba66f1ae3294969b20989a382f56cbc5
|
[
"Apache-2.0"
] |
permissive
|
intel/intel-extension-for-pytorch
|
60ce2af2ec3a1dacae0d0db13dd51a5b44512e61
|
7f9266789de7ca9d8bcf55606f3204f1a3640640
|
refs/heads/master
| 2023-09-01T09:13:16.866410
| 2023-08-31T08:00:37
| 2023-08-31T08:00:37
| 256,061,008
| 991
| 144
|
Apache-2.0
| 2023-08-13T13:56:07
| 2020-04-15T23:35:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
test_compile.py
|
import unittest
import itertools
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
class Conv_Bn_Relu(nn.Module):
def __init__(self):
super(Conv_Bn_Relu, self).__init__()
self.conv = nn.Conv2d(6, 3, 3)
self.bn = nn.BatchNorm2d(3, eps=0.001)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
class TestCompile(TestCase):
def test_inference(self):
model_ = Conv_Bn_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
for dtype, ipex_optimize in itertools.product(
[torch.float32, torch.bfloat16], [True, False]
):
model = copy.deepcopy(model_)
if ipex_optimize:
model = ipex.optimize(model, dtype=dtype)
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y1 = model(x)
fx_model = torch.fx.symbolic_trace(model)
compiled_model = ipex.compile(fx_model, [x])
# warm up
for _ in range(2):
compiled_model(x)
y2 = compiled_model(x)
self.assertEqual(y1, y2)
self.assertTrue(y2.dtype == dtype)
if __name__ == "__main__":
test = unittest.main()
|
d1b1b74a4c54657786995c6e9c8b9140b32e6931
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/mmdetection/configs/objects365/retinanet_r50-syncbn_fpn_1350k_objects365v1.py
|
c41dfce8bc67e7f4d18434a2c10a33c66da403c1
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
retinanet_r50-syncbn_fpn_1350k_objects365v1.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/objects365v2_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(norm_cfg=dict(type='SyncBN', requires_grad=True)),
bbox_head=dict(num_classes=365))
# training schedule for 1350K
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=1350000, # 36 epochs
val_interval=150000)
# Using 8 GPUS while training
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001),
clip_grad=dict(max_norm=35, norm_type=2))
# learning rate policy
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 1000,
by_epoch=False,
begin=0,
end=10000),
dict(
type='MultiStepLR',
begin=0,
end=1350000,
by_epoch=False,
milestones=[900000, 1200000],
gamma=0.1)
]
train_dataloader = dict(sampler=dict(type='InfiniteSampler'))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=150000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
ddce7998dfa47736db14a58e0e439875812b5af8
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/examples/styling/plots/grid_band_hatch.py
|
e718b2c36c902cfb026dd1a5f4b8dd4dbe352d44
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 482
|
py
|
grid_band_hatch.py
|
from bokeh.plotting import figure, show
p = figure(height=250, width=600, x_range=(0, 10), tools="", toolbar_location=None)
p.line(x=[0,1,2,3,4,5,6,7,8,9,10],
y=[1,3,4,3,1,2,6,5,2,3,4])
p.ygrid.grid_line_color = None
ticks = [0, 2, 4, 6, 8, 10]
p.xaxis[0].ticker = ticks
p.xgrid[0].ticker = ticks
p.xgrid.band_hatch_pattern = "/"
p.xgrid.band_hatch_alpha = 0.6
p.xgrid.band_hatch_color = "lightgrey"
p.xgrid.band_hatch_weight = 0.5
p.xgrid.band_hatch_scale = 10
show(p)
|
5250b7ee01dfd990f3efc0de3a3c234ed56f0d7a
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_runtime/vart/trace/vaitrace/vaitraceCppRunner.py
|
888bebc7f197decc072e0bd394099ec0e304433e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,238
|
py
|
vaitraceCppRunner.py
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# Copyright 2022-2023 Advanced Micro Devices Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import signal
import time
import logging
from subprocess import Popen, PIPE
import collector
import tracer
import vaitraceCfgManager
import vaitraceSetting
force_exit = False
def handler(signum, frame):
logging.info("Killing process...")
logging.info("Processing trace data, please wait...")
global force_exit
if force_exit:
logging.error("Force exit...")
exit(-1)
force_exit = True
def shell_find_exec_path(_exe):
exe_abs_path = ""
if os.path.exists(os.path.abspath(_exe)):
"""1. search in cur dir"""
exe_abs_path = os.path.abspath(_exe)
else:
"""2. search in path via 'which'"""
which_cmd = ["which", _exe]
p = Popen(which_cmd, stdout=PIPE, stderr=PIPE)
res = p.stdout.readlines()
if len(res) > 0:
exe_abs_path = res[0].strip().decode()
if os.path.exists(exe_abs_path) == False:
raise RuntimeError("Executable file not exists [%s]" % _exe)
logging.info("Executable file: %s" % exe_abs_path)
return exe_abs_path
def run(globalOptions: dict):
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
options = globalOptions
"""Help find the path of cmd in PATH"""
cmd = options.get('control').get('cmd')[0]
options['control']['cmd'][0] = shell_find_exec_path(cmd)
if options.get('cmdline_args').get('bypass', False):
cmd = options.get('control').get('cmd')
logging.info("Bypass vaitrace, just run cmd")
proc = Popen(cmd)
proc.wait()
exit(0)
"""Preparing"""
tracer.prepare(options)
tracer.start()
"""requirememt format: ["tracerName", "tracerName1", "hwInfo", ...]"""
collector.prepare(options, tracer.getSourceRequirement())
collector.start()
"""Start Running"""
cmd = options.get('control').get('cmd')
timeout = options.get('control').get('timeout')
proc = Popen(cmd)
options['control']['launcher'] = "cpp"
options['control']['pid'] = proc.pid
options['control']['time'] = time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime())
if timeout <= 0:
proc.wait()
else:
while timeout > 0:
time.sleep(1)
timeout -= 1
p = proc.poll()
if p is not None:
break
if (timeout == 0):
logging.info("vaitrace time out, stopping process...")
proc.send_signal(signal.SIGINT)
proc.wait()
collector.stop()
tracer.stop()
tracer.process(collector.getData())
|
999cca9c688bc76485e0fa8c4f116a1b713204f6
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Introducing_CircuitPlaygroundExpress/CircuitPlaygroundExpress_SoundMeter/code.py
|
f61354bac485dcd9fd61e0cf0f75c6496abad3e2
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 4,445
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2017 Dan Halbert for Adafruit Industries
# SPDX-FileCopyrightText: 2017 Tony DiCola for Adafruit Industries
# SPDX-FileCopyrightText: 2017 Kattni Rembor for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# The MIT License (MIT)
#
# Copyright (c) 2017 Dan Halbert for Adafruit Industries
# Copyright (c) 2017 Kattni Rembor, Tony DiCola for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Circuit Playground Sound Meter
import array
import math
import audiobusio
import board
import neopixel
# Color of the peak pixel.
PEAK_COLOR = (100, 0, 255)
# Number of total pixels - 10 build into Circuit Playground
NUM_PIXELS = 10
# Exponential scaling factor.
# Should probably be in range -10 .. 10 to be reasonable.
CURVE = 2
SCALE_EXPONENT = math.pow(10, CURVE * -0.1)
# Number of samples to read at once.
NUM_SAMPLES = 160
# Restrict value to be between floor and ceiling.
def constrain(value, floor, ceiling):
return max(floor, min(value, ceiling))
# Scale input_value between output_min and output_max, exponentially.
def log_scale(input_value, input_min, input_max, output_min, output_max):
normalized_input_value = (input_value - input_min) / \
(input_max - input_min)
return output_min + \
math.pow(normalized_input_value, SCALE_EXPONENT) \
* (output_max - output_min)
# Remove DC bias before computing RMS.
def normalized_rms(values):
minbuf = int(mean(values))
samples_sum = sum(
float(sample - minbuf) * (sample - minbuf)
for sample in values
)
return math.sqrt(samples_sum / len(values))
def mean(values):
return sum(values) / len(values)
def volume_color(volume):
return 200, volume * (255 // NUM_PIXELS), 0
# Main program
# Set up NeoPixels and turn them all off.
pixels = neopixel.NeoPixel(board.NEOPIXEL, NUM_PIXELS, brightness=0.1, auto_write=False)
pixels.fill(0)
pixels.show()
mic = audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA,
sample_rate=16000, bit_depth=16)
# Record an initial sample to calibrate. Assume it's quiet when we start.
samples = array.array('H', [0] * NUM_SAMPLES)
mic.record(samples, len(samples))
# Set lowest level to expect, plus a little.
input_floor = normalized_rms(samples) + 10
# OR: used a fixed floor
# input_floor = 50
# You might want to print the input_floor to help adjust other values.
# print(input_floor)
# Corresponds to sensitivity: lower means more pixels light up with lower sound
# Adjust this as you see fit.
input_ceiling = input_floor + 500
peak = 0
while True:
mic.record(samples, len(samples))
magnitude = normalized_rms(samples)
# You might want to print this to see the values.
# print(magnitude)
# Compute scaled logarithmic reading in the range 0 to NUM_PIXELS
c = log_scale(constrain(magnitude, input_floor, input_ceiling),
input_floor, input_ceiling, 0, NUM_PIXELS)
# Light up pixels that are below the scaled and interpolated magnitude.
pixels.fill(0)
for i in range(NUM_PIXELS):
if i < c:
pixels[i] = volume_color(i)
# Light up the peak pixel and animate it slowly dropping.
if c >= peak:
peak = min(c, NUM_PIXELS - 1)
elif peak > 0:
peak = peak - 1
if peak > 0:
pixels[int(peak)] = PEAK_COLOR
pixels.show()
|
fea27083f922b715fed2ffaa9083b91a7b34712d
|
b728c792b5171f6be6ad91919b4a76a6f198b3e9
|
/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
|
d99a3813ca95436d77b82a5ca9dab24a7985d09c
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"BSL-1.0"
] |
permissive
|
bundy-dns/bundy
|
c8beeca2c051924590794c92a3a58d1980a86024
|
3d41934996b82b0cd2fe22dd74d2abc1daba835d
|
refs/heads/master
| 2021-09-28T16:24:39.037808
| 2021-09-22T06:04:17
| 2021-09-22T06:04:17
| 19,160,469
| 110
| 33
|
NOASSERTION
| 2021-09-22T06:04:18
| 2014-04-25T20:54:37
|
C++
|
UTF-8
|
Python
| false
| false
| 98
|
spec
|
rdata_minfo_toWireUncompressed1.spec
|
#
# A simplest form of MINFO: all default parameters
#
[custom]
sections: minfo
[minfo]
rdlen: -1
|
57712ac50c6297ec877fa1bfd029a51ab1b781b8
|
7f59e2c4e771c19378e9839406c220d3985e7efe
|
/python-toolbox/tests/communication/test_remote_calls.py
|
27f19cfc06f54062088f07048fd4ff3cfe6fbb81
|
[
"Apache-2.0"
] |
permissive
|
apache/incubator-marvin
|
c6ff32d50eb01ccd84266587d79f562a9e371496
|
58fdccf2e677041a13966ddbdd96d484edf3b474
|
refs/heads/develop
| 2023-08-30T12:46:56.973102
| 2022-11-18T15:27:52
| 2022-11-18T15:27:52
| 148,087,939
| 112
| 77
|
Apache-2.0
| 2023-03-07T05:45:59
| 2018-09-10T02:27:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,371
|
py
|
test_remote_calls.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright [2020] [Apache Software Foundation]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
import mock
except ImportError:
import unittest.mock as mock
from marvin_python_toolbox.communication.remote_calls import RemoteCalls
mocked_host = '0.0.0.0'
mocked_port = 0
mocked_name = 'GRPC'
mocked_params = {
'mocked': 'params'
}
mocked_args = 'mocked_args'
@mock.patch('marvin_python_toolbox.communication.remote_calls.grpc.insecure_channel')
def test_RemoteCall_init(channel_mocked):
RemoteCalls(mocked_host, mocked_port)
channel_mocked.assert_called_with('0.0.0.0:0')
@mock.patch('marvin_python_toolbox.communication.remote_calls.daemon_pb2_grpc.CommandCallStub')
def test_call_command(stub_mocked):
stub = stub_mocked.return_value
rc = RemoteCalls()
rc.call_command(mocked_name, mocked_params)
stub.callCommand.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.daemon_pb2_grpc.CommandCallStub')
def test_stop_command(stub_mocked):
stub = stub_mocked.return_value
rc = RemoteCalls()
rc.stop_command(mocked_name)
stub.stopCommand.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.call_command')
def test_run_grpc(call_mocked):
rc = RemoteCalls()
rc.run_grpc('all', None, None)
call_mocked.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.stop_command')
def test_stop_grpc(stop_mocked):
rc = RemoteCalls()
rc.stop_grpc()
stop_mocked.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.call_command')
def test_run_dryrun(call_mocked):
rc = RemoteCalls()
rc.run_dryrun('all', True)
call_mocked.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.call_command')
def test_run_notebook(call_mocked):
rc = RemoteCalls()
rc.run_notebook(True)
call_mocked.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.call_command')
def test_run_lab(call_mocked):
rc = RemoteCalls()
rc.run_lab(mocked_port)
call_mocked.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.call_command')
def test_run_test(call_mocked):
rc = RemoteCalls()
rc.run_test(True, True, True, mocked_args)
call_mocked.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.call_command')
def test_run_tox(call_mocked):
rc = RemoteCalls()
rc.run_tox(mocked_args)
call_mocked.assert_called()
@mock.patch('marvin_python_toolbox.communication.remote_calls.RemoteCalls.call_command')
def test_run_tdd(call_mocked):
rc = RemoteCalls()
rc.run_tdd(True, True, True, True, mocked_args)
call_mocked.assert_called()
|
dabcc1d7af4a1b255a90aac81501233f070480fe
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/devicefarm/_inputs.py
|
f5ef3904c7de885bb19b1d45014a51fc4611fc0f
|
[
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 4,961
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DevicePoolRuleArgs',
'TestGridProjectVpcConfigArgs',
]
@pulumi.input_type
class DevicePoolRuleArgs:
def __init__(__self__, *,
attribute: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] attribute: The rule's stringified attribute. Valid values are: `APPIUM_VERSION`, `ARN`, `AVAILABILITY`, `FLEET_TYPE`, `FORM_FACTOR`, `INSTANCE_ARN`, `INSTANCE_LABELS`, `MANUFACTURER`, `MODEL`, `OS_VERSION`, `PLATFORM`, `REMOTE_ACCESS_ENABLED`, `REMOTE_DEBUG_ENABLED`.
:param pulumi.Input[str] operator: Specifies how Device Farm compares the rule's attribute to the value. For the operators that are supported by each attribute. Valid values are: `EQUALS`, `NOT_IN`, `IN`, `GREATER_THAN`, `GREATER_THAN_OR_EQUALS`, `LESS_THAN`, `LESS_THAN_OR_EQUALS`, `CONTAINS`.
:param pulumi.Input[str] value: The rule's value.
"""
if attribute is not None:
pulumi.set(__self__, "attribute", attribute)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def attribute(self) -> Optional[pulumi.Input[str]]:
"""
The rule's stringified attribute. Valid values are: `APPIUM_VERSION`, `ARN`, `AVAILABILITY`, `FLEET_TYPE`, `FORM_FACTOR`, `INSTANCE_ARN`, `INSTANCE_LABELS`, `MANUFACTURER`, `MODEL`, `OS_VERSION`, `PLATFORM`, `REMOTE_ACCESS_ENABLED`, `REMOTE_DEBUG_ENABLED`.
"""
return pulumi.get(self, "attribute")
@attribute.setter
def attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
Specifies how Device Farm compares the rule's attribute to the value. For the operators that are supported by each attribute. Valid values are: `EQUALS`, `NOT_IN`, `IN`, `GREATER_THAN`, `GREATER_THAN_OR_EQUALS`, `LESS_THAN`, `LESS_THAN_OR_EQUALS`, `CONTAINS`.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The rule's value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class TestGridProjectVpcConfigArgs:
def __init__(__self__, *,
security_group_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
subnet_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
vpc_id: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: A list of VPC security group IDs in your Amazon VPC.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: A list of VPC subnet IDs in your Amazon VPC.
:param pulumi.Input[str] vpc_id: The ID of the Amazon VPC.
"""
pulumi.set(__self__, "security_group_ids", security_group_ids)
pulumi.set(__self__, "subnet_ids", subnet_ids)
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of VPC security group IDs in your Amazon VPC.
"""
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of VPC subnet IDs in your Amazon VPC.
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
The ID of the Amazon VPC.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
|
81f0c1f80e85874f8060adb0e6638bb6986d08e5
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/angle/scripts/registry_xml.py
|
618862e46dd4f794fc77102d35a91c041aa44dd5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 23,064
|
py
|
registry_xml.py
|
#!/usr/bin/python3
#
# Copyright 2018 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# registry_xml.py:
# Parses information from Khronos registry files..
# List of supported extensions. Add to this list to enable new extensions
# available in gl.xml.
import difflib
import os
import sys
import xml.etree.ElementTree as etree
from enum import Enum
khronos_xml_inputs = [
'../third_party/EGL-Registry/src/api/egl.xml',
'../third_party/OpenCL-Docs/src/xml/cl.xml',
'../third_party/OpenGL-Registry/src/xml/gl.xml',
'../third_party/OpenGL-Registry/src/xml/glx.xml',
'../third_party/OpenGL-Registry/src/xml/wgl.xml',
]
angle_xml_inputs = [
'gl_angle_ext.xml',
'egl_angle_ext.xml',
'registry_xml.py',
]
xml_inputs = sorted(khronos_xml_inputs + angle_xml_inputs)
# Notes on categories of extensions:
# 'Requestable' extensions are extensions that can be enabled with ANGLE_request_extension
# 'ES-Only' extensions are always implicitly enabled.
# 'Toggleable' extensions are like 'Requestable' except they can be also disabled.
# 'ANGLE' extensions are extensions that are not yet officially upstreamed to Khronos.
# We document those extensions in gl_angle_ext.xml instead of the canonical gl.xml.
angle_toggleable_extensions = [
"GL_ANGLE_texture_rectangle",
]
angle_requestable_extensions = [
"GL_ANGLE_base_vertex_base_instance",
"GL_ANGLE_base_vertex_base_instance_shader_builtin",
"GL_ANGLE_clip_cull_distance",
"GL_ANGLE_compressed_texture_etc",
"GL_ANGLE_copy_texture_3d",
"GL_ANGLE_framebuffer_multisample",
"GL_ANGLE_get_image",
"GL_ANGLE_get_tex_level_parameter",
"GL_ANGLE_logic_op",
"GL_ANGLE_lossy_etc_decode",
"GL_ANGLE_memory_object_flags",
"GL_ANGLE_memory_object_fuchsia",
"GL_ANGLE_memory_size",
"GL_ANGLE_multi_draw",
"GL_ANGLE_multiview_multisample",
"GL_ANGLE_polygon_mode",
"GL_ANGLE_provoking_vertex",
"GL_ANGLE_read_only_depth_stencil_feedback_loops",
"GL_ANGLE_renderability_validation",
"GL_ANGLE_robust_fragment_shader_output",
"GL_ANGLE_semaphore_fuchsia",
"GL_ANGLE_shader_pixel_local_storage",
"GL_ANGLE_shader_pixel_local_storage_coherent",
"GL_ANGLE_stencil_texturing",
"GL_ANGLE_texture_compression_dxt3",
"GL_ANGLE_texture_compression_dxt5",
"GL_ANGLE_texture_external_update",
"GL_ANGLE_texture_multisample",
"GL_ANGLE_vulkan_image",
"GL_ANGLE_yuv_internal_format",
"GL_CHROMIUM_color_buffer_float_rgb",
"GL_CHROMIUM_color_buffer_float_rgba",
"GL_CHROMIUM_lose_context",
"GL_CHROMIUM_sync_query",
"GL_CHROMIUM_texture_filtering_hint",
]
gles_requestable_extensions = [
"GL_ANGLE_framebuffer_blit",
"GL_ANGLE_instanced_arrays",
"GL_ANGLE_pack_reverse_row_order",
"GL_ANGLE_texture_usage",
"GL_APPLE_clip_distance",
"GL_ARB_sync",
"GL_ARM_shader_framebuffer_fetch",
"GL_EXT_base_instance",
"GL_EXT_blend_func_extended",
"GL_EXT_blend_minmax",
"GL_EXT_buffer_storage",
"GL_EXT_clip_control",
"GL_EXT_clip_cull_distance",
"GL_EXT_color_buffer_float",
"GL_EXT_color_buffer_half_float",
"GL_EXT_compressed_ETC1_RGB8_sub_texture",
"GL_EXT_conservative_depth",
"GL_EXT_copy_image",
"GL_EXT_depth_clamp",
"GL_EXT_disjoint_timer_query",
"GL_EXT_draw_buffers",
"GL_EXT_draw_buffers_indexed",
"GL_EXT_draw_elements_base_vertex",
"GL_EXT_EGL_image_array",
"GL_EXT_EGL_image_external_wrap_modes",
"GL_EXT_EGL_image_storage",
"GL_EXT_external_buffer",
"GL_EXT_float_blend",
"GL_EXT_frag_depth",
"GL_EXT_geometry_shader",
"GL_EXT_gpu_shader5",
"GL_EXT_instanced_arrays",
"GL_EXT_map_buffer_range",
"GL_EXT_memory_object",
"GL_EXT_memory_object_fd",
"GL_EXT_multi_draw_indirect",
"GL_EXT_multisampled_render_to_texture",
"GL_EXT_multisampled_render_to_texture2",
"GL_EXT_occlusion_query_boolean",
"GL_EXT_polygon_offset_clamp",
"GL_EXT_protected_textures",
"GL_EXT_pvrtc_sRGB",
"GL_EXT_read_format_bgra",
"GL_EXT_render_snorm",
"GL_EXT_semaphore",
"GL_EXT_semaphore_fd",
"GL_EXT_separate_shader_objects",
"GL_EXT_shader_framebuffer_fetch",
"GL_EXT_shader_framebuffer_fetch_non_coherent",
"GL_EXT_shader_io_blocks",
"GL_EXT_shader_non_constant_global_initializers",
"GL_EXT_shader_texture_lod",
"GL_EXT_shadow_samplers",
"GL_EXT_sRGB",
"GL_EXT_tessellation_shader",
"GL_EXT_texture_border_clamp",
"GL_EXT_texture_buffer",
"GL_EXT_texture_compression_bptc",
"GL_EXT_texture_compression_dxt1",
"GL_EXT_texture_compression_rgtc",
"GL_EXT_texture_compression_s3tc",
"GL_EXT_texture_compression_s3tc_srgb",
"GL_EXT_texture_cube_map_array",
"GL_EXT_texture_filter_anisotropic",
"GL_EXT_texture_filter_minmax",
"GL_EXT_texture_format_BGRA8888",
"GL_EXT_texture_mirror_clamp_to_edge",
"GL_EXT_texture_norm16",
"GL_EXT_texture_rg",
"GL_EXT_texture_sRGB_R8",
"GL_EXT_texture_sRGB_RG8",
"GL_EXT_texture_storage",
"GL_EXT_texture_type_2_10_10_10_REV",
"GL_EXT_unpack_subimage",
"GL_EXT_YUV_target",
"GL_IMG_texture_compression_pvrtc",
"GL_IMG_texture_compression_pvrtc2",
"GL_KHR_parallel_shader_compile",
"GL_KHR_texture_compression_astc_hdr",
"GL_KHR_texture_compression_astc_ldr",
"GL_KHR_texture_compression_astc_sliced_3d",
"GL_MESA_framebuffer_flip_y",
"GL_NV_depth_buffer_float2",
"GL_NV_EGL_stream_consumer_external",
"GL_NV_framebuffer_blit",
"GL_NV_pack_subimage",
"GL_NV_pixel_buffer_object",
"GL_NV_polygon_mode",
"GL_NV_read_depth",
"GL_NV_read_depth_stencil",
"GL_NV_read_stencil",
"GL_NV_shader_noperspective_interpolation",
"GL_OES_compressed_EAC_R11_signed_texture",
"GL_OES_compressed_EAC_R11_unsigned_texture",
"GL_OES_compressed_EAC_RG11_signed_texture",
"GL_OES_compressed_EAC_RG11_unsigned_texture",
"GL_OES_compressed_ETC1_RGB8_texture",
"GL_OES_compressed_ETC2_punchthroughA_RGBA8_texture",
"GL_OES_compressed_ETC2_punchthroughA_sRGB8_alpha_texture",
"GL_OES_compressed_ETC2_RGB8_texture",
"GL_OES_compressed_ETC2_RGBA8_texture",
"GL_OES_compressed_ETC2_sRGB8_alpha8_texture",
"GL_OES_compressed_ETC2_sRGB8_texture",
"GL_OES_compressed_paletted_texture",
"GL_OES_copy_image",
"GL_OES_depth_texture_cube_map",
"GL_OES_draw_buffers_indexed",
"GL_OES_draw_elements_base_vertex",
"GL_OES_EGL_image",
"GL_OES_EGL_image_external",
"GL_OES_EGL_image_external_essl3",
"GL_OES_element_index_uint",
"GL_OES_fbo_render_mipmap",
"GL_OES_geometry_shader",
"GL_OES_get_program_binary",
"GL_OES_mapbuffer",
"GL_OES_rgb8_rgba8",
"GL_OES_sample_shading",
"GL_OES_sample_variables",
"GL_OES_shader_image_atomic",
"GL_OES_shader_io_blocks",
"GL_OES_shader_multisample_interpolation",
"GL_OES_standard_derivatives",
"GL_OES_texture_3D",
"GL_OES_texture_border_clamp",
"GL_OES_texture_buffer",
"GL_OES_texture_compression_astc",
"GL_OES_texture_cube_map_array",
"GL_OES_texture_float",
"GL_OES_texture_float_linear",
"GL_OES_texture_half_float",
"GL_OES_texture_half_float_linear",
"GL_OES_texture_npot",
"GL_OES_texture_stencil8",
"GL_OES_texture_storage_multisample_2d_array",
"GL_OES_vertex_array_object",
"GL_OES_vertex_half_float",
"GL_OES_vertex_type_10_10_10_2",
"GL_OVR_multiview",
"GL_OVR_multiview2",
"GL_QCOM_render_shared_exponent",
"GL_QCOM_shading_rate",
"GL_WEBGL_video_texture",
]
angle_es_only_extensions = [
"GL_ANGLE_client_arrays",
"GL_ANGLE_get_serialized_context_string",
"GL_ANGLE_program_binary",
"GL_ANGLE_program_cache_control",
"GL_ANGLE_relaxed_vertex_attribute_type",
"GL_ANGLE_request_extension",
"GL_ANGLE_rgbx_internal_format",
"GL_ANGLE_robust_client_memory",
"GL_ANGLE_robust_resource_initialization",
"GL_ANGLE_shader_binary",
"GL_ANGLE_webgl_compatibility",
"GL_CHROMIUM_bind_generates_resource",
"GL_CHROMIUM_bind_uniform_location",
"GL_CHROMIUM_copy_compressed_texture",
"GL_CHROMIUM_copy_texture",
"GL_CHROMIUM_framebuffer_mixed_samples",
]
gles_es_only_extensions = [
"GL_AMD_performance_monitor",
"GL_ANDROID_extension_pack_es31a",
"GL_ANGLE_depth_texture",
"GL_ANGLE_translated_shader_source",
"GL_EXT_debug_label",
"GL_EXT_debug_marker",
"GL_EXT_discard_framebuffer",
"GL_EXT_multisample_compatibility",
"GL_EXT_primitive_bounding_box",
"GL_EXT_robustness",
"GL_EXT_sRGB_write_control",
"GL_EXT_texture_format_sRGB_override",
"GL_EXT_texture_sRGB_decode",
"GL_KHR_blend_equation_advanced",
"GL_KHR_debug",
"GL_KHR_no_error",
"GL_KHR_robust_buffer_access_behavior",
"GL_NV_fence",
"GL_NV_robustness_video_memory_purge",
"GL_OES_depth24",
"GL_OES_depth32",
"GL_OES_depth_texture",
"GL_OES_EGL_sync",
"GL_OES_packed_depth_stencil",
"GL_OES_primitive_bounding_box",
"GL_OES_surfaceless_context",
]
# ES1 (Possibly the min set of extensions needed by Android)
gles1_extensions = [
"GL_OES_draw_texture",
"GL_OES_framebuffer_object",
"GL_OES_matrix_palette",
"GL_OES_point_size_array",
"GL_OES_point_sprite",
"GL_OES_query_matrix",
"GL_OES_texture_cube_map",
]
def check_sorted(name, l):
unidiff = difflib.unified_diff(l, sorted(l, key=str.casefold), 'unsorted', 'sorted')
diff_lines = list(unidiff)
assert not diff_lines, '\n\nPlease sort "%s":\n%s' % (name, '\n'.join(diff_lines))
angle_extensions = angle_requestable_extensions + angle_es_only_extensions + angle_toggleable_extensions
gles_extensions = gles_requestable_extensions + gles_es_only_extensions
supported_extensions = sorted(angle_extensions + gles1_extensions + gles_extensions)
assert len(supported_extensions) == len(set(supported_extensions)), 'Duplicates in extension list'
check_sorted('angle_requestable_extensions', angle_requestable_extensions)
check_sorted('angle_es_only_extensions', angle_es_only_extensions)
check_sorted('angle_toggleable_extensions', angle_toggleable_extensions)
check_sorted('gles_requestable_extensions', gles_requestable_extensions)
check_sorted('gles_es_only_extensions', gles_es_only_extensions)
check_sorted('gles_extensions', gles1_extensions)
supported_egl_extensions = [
"EGL_ANDROID_blob_cache",
"EGL_ANDROID_create_native_client_buffer",
"EGL_ANDROID_framebuffer_target",
"EGL_ANDROID_get_frame_timestamps",
"EGL_ANDROID_get_native_client_buffer",
"EGL_ANDROID_native_fence_sync",
"EGL_ANDROID_presentation_time",
"EGL_ANGLE_create_surface_swap_interval",
"EGL_ANGLE_d3d_share_handle_client_buffer",
"EGL_ANGLE_device_creation",
"EGL_ANGLE_device_d3d",
"EGL_ANGLE_display_semaphore_share_group",
"EGL_ANGLE_display_texture_share_group",
"EGL_ANGLE_feature_control",
"EGL_ANGLE_ggp_stream_descriptor",
"EGL_ANGLE_metal_create_context_ownership_identity",
"EGL_ANGLE_metal_shared_event_sync",
"EGL_ANGLE_power_preference",
"EGL_ANGLE_prepare_swap_buffers",
"EGL_ANGLE_program_cache_control",
"EGL_ANGLE_query_surface_pointer",
"EGL_ANGLE_stream_producer_d3d_texture",
"EGL_ANGLE_surface_d3d_texture_2d_share_handle",
"EGL_ANGLE_swap_with_frame_token",
"EGL_ANGLE_sync_control_rate",
"EGL_ANGLE_vulkan_image",
"EGL_ANGLE_wait_until_work_scheduled",
"EGL_ANGLE_window_fixed_size",
"EGL_CHROMIUM_sync_control",
"EGL_EXT_create_context_robustness",
"EGL_EXT_device_query",
"EGL_EXT_gl_colorspace_display_p3",
"EGL_EXT_gl_colorspace_display_p3_linear",
"EGL_EXT_gl_colorspace_display_p3_passthrough",
"EGL_EXT_gl_colorspace_scrgb",
"EGL_EXT_gl_colorspace_scrgb_linear",
"EGL_EXT_image_dma_buf_import",
"EGL_EXT_image_dma_buf_import_modifiers",
"EGL_EXT_image_gl_colorspace",
"EGL_EXT_pixel_format_float",
"EGL_EXT_platform_base",
"EGL_EXT_platform_device",
"EGL_EXT_protected_content",
"EGL_IMG_context_priority",
"EGL_KHR_debug",
"EGL_KHR_fence_sync",
"EGL_KHR_gl_colorspace",
"EGL_KHR_image",
"EGL_KHR_lock_surface3",
"EGL_KHR_mutable_render_buffer",
"EGL_KHR_no_config_context",
"EGL_KHR_partial_update",
"EGL_KHR_reusable_sync",
"EGL_KHR_stream",
"EGL_KHR_stream_consumer_gltexture",
"EGL_KHR_surfaceless_context",
"EGL_KHR_swap_buffers_with_damage",
"EGL_KHR_wait_sync",
"EGL_NV_post_sub_buffer",
"EGL_NV_stream_consumer_gltexture_yuv",
]
check_sorted('supported_egl_extensions', supported_egl_extensions)
supported_cl_extensions = [
# Since OpenCL 1.1
"cl_khr_byte_addressable_store",
"cl_khr_global_int32_base_atomics",
"cl_khr_global_int32_extended_atomics",
"cl_khr_local_int32_base_atomics",
"cl_khr_local_int32_extended_atomics",
# OpenCL 2.0 - 2.2
"cl_khr_3d_image_writes",
"cl_khr_depth_images",
"cl_khr_image2d_from_buffer",
# Optional
"cl_khr_extended_versioning",
"cl_khr_fp64",
"cl_khr_icd",
"cl_khr_int64_base_atomics",
"cl_khr_int64_extended_atomics",
]
# Strip these suffixes from Context entry point names. NV is excluded (for now).
strip_suffixes = ["AMD", "ANDROID", "ANGLE", "CHROMIUM", "EXT", "KHR", "OES", "OVR"]
check_sorted('strip_suffixes', strip_suffixes)
# The EGL_ANGLE_explicit_context extension is generated differently from other extensions.
# Toggle generation here.
support_EGL_ANGLE_explicit_context = True
# Group names that appear in command/param, but not present in groups/group
unsupported_enum_group_names = {
'GetMultisamplePNameNV',
'BufferPNameARB',
'BufferPointerNameARB',
'VertexAttribPointerPropertyARB',
'VertexAttribPropertyARB',
'FenceParameterNameNV',
'FenceConditionNV',
'BufferPointerNameARB',
'MatrixIndexPointerTypeARB',
'PointParameterNameARB',
'ClampColorTargetARB',
'ClampColorModeARB',
}
# Versions (major, minor). Note that GLES intentionally places 1.0 last.
DESKTOP_GL_VERSIONS = [(1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (2, 0), (2, 1), (3, 0),
(3, 1), (3, 2), (3, 3), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4), (4, 5),
(4, 6)]
GLES_VERSIONS = [(2, 0), (3, 0), (3, 1), (3, 2), (1, 0)]
EGL_VERSIONS = [(1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5)]
WGL_VERSIONS = [(1, 0)]
GLX_VERSIONS = [(1, 0), (1, 1), (1, 2), (1, 3), (1, 4)]
CL_VERSIONS = [(1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2), (3, 0)]
# API types
class apis:
GL = 'GL'
GLES = 'GLES'
WGL = 'WGL'
GLX = 'GLX'
EGL = 'EGL'
CL = 'CL'
# For GLenum types
api_enums = {apis.GL: 'BigGLEnum', apis.GLES: 'GLESEnum'}
default_enum_group_name = 'AllEnums'
def script_relative(path):
return os.path.join(os.path.dirname(sys.argv[0]), path)
def path_to(folder, file):
return os.path.join(script_relative(".."), "src", folder, file)
def strip_api_prefix(cmd_name):
return cmd_name.lstrip("cwegl")
def find_xml_input(xml_file):
for found_xml in xml_inputs:
if found_xml == xml_file or found_xml.endswith('/' + xml_file):
return found_xml
raise Exception('Could not find XML input: ' + xml_file)
def get_cmd_name(command_node):
proto = command_node.find('proto')
cmd_name = proto.find('name').text
return cmd_name
class CommandNames:
def __init__(self):
self.command_names = {}
def get_commands(self, version):
return self.command_names[version]
def get_all_commands(self):
cmd_names = []
# Combine all the version lists into a single list
for version, version_cmd_names in sorted(self.command_names.items()):
cmd_names += version_cmd_names
return cmd_names
def add_commands(self, version, commands):
# Add key if it doesn't exist
if version not in self.command_names:
self.command_names[version] = []
# Add the commands that aren't duplicates
self.command_names[version] += commands
class RegistryXML:
def __init__(self, xml_file, ext_file=None):
tree = etree.parse(script_relative(find_xml_input(xml_file)))
self.root = tree.getroot()
if (ext_file):
self._AppendANGLEExts(find_xml_input(ext_file))
self.all_commands = self.root.findall('commands/command')
self.all_cmd_names = CommandNames()
self.commands = {}
def _AppendANGLEExts(self, ext_file):
angle_ext_tree = etree.parse(script_relative(ext_file))
angle_ext_root = angle_ext_tree.getroot()
insertion_point = self.root.findall("./commands")[0]
for command in angle_ext_root.iter('commands'):
insertion_point.extend(command)
insertion_point = self.root.findall("./extensions")[0]
for extension in angle_ext_root.iter('extensions'):
insertion_point.extend(extension)
insertion_point = self.root
for enums in angle_ext_root.iter('enums'):
insertion_point.append(enums)
def AddCommands(self, feature_name, annotation):
xpath = ".//feature[@name='%s']//command" % feature_name
commands = [cmd.attrib['name'] for cmd in self.root.findall(xpath)]
# Remove commands that have already been processed
current_cmds = self.all_cmd_names.get_all_commands()
commands = [cmd for cmd in commands if cmd not in current_cmds]
self.all_cmd_names.add_commands(annotation, commands)
self.commands[annotation] = commands
def _ClassifySupport(self, extension):
supported = extension.attrib['supported']
# Desktop GL extensions exposed in ANGLE GLES for Chrome.
if extension.attrib['name'] in ['GL_ARB_sync', 'GL_NV_robustness_video_memory_purge']:
supported += "|gles2"
if 'gles2' in supported:
return 'gl2ext'
elif 'gles1' in supported:
return 'glext'
elif 'egl' in supported:
return 'eglext'
elif 'wgl' in supported:
return 'wglext'
elif 'glx' in supported:
return 'glxext'
elif 'cl' in supported:
return 'clext'
else:
assert False, 'Cannot classify support for %s: %s' % (extension.attrib['name'],
supported)
return 'unknown'
def AddExtensionCommands(self, supported_extensions, apis):
# Use a first step to run through the extensions so we can generate them
# in sorted order.
self.ext_data = {}
self.ext_dupes = {}
ext_annotations = {}
for extension in self.root.findall("extensions/extension"):
extension_name = extension.attrib['name']
if not extension_name in supported_extensions:
continue
ext_annotations[extension_name] = self._ClassifySupport(extension)
ext_cmd_names = []
# There's an extra step here to filter out 'api=gl' extensions. This
# is necessary for handling KHR extensions, which have separate entry
# point signatures (without the suffix) for desktop GL. Note that this
# extra step is necessary because of Etree's limited Xpath support.
for require in extension.findall('require'):
if 'api' in require.attrib and require.attrib['api'] not in apis:
continue
# A special case for EXT_texture_storage
filter_out_comment = "Supported only if GL_EXT_direct_state_access is supported"
if 'comment' in require.attrib and require.attrib['comment'] == filter_out_comment:
continue
extension_commands = require.findall('command')
ext_cmd_names += [command.attrib['name'] for command in extension_commands]
self.ext_data[extension_name] = sorted(ext_cmd_names)
for extension_name, ext_cmd_names in sorted(self.ext_data.items()):
# Detect and filter duplicate extensions.
dupes = []
for ext_cmd in ext_cmd_names:
if ext_cmd in self.all_cmd_names.get_all_commands():
dupes.append(ext_cmd)
for dupe in dupes:
ext_cmd_names.remove(dupe)
self.ext_data[extension_name] = sorted(ext_cmd_names)
self.ext_dupes[extension_name] = dupes
self.all_cmd_names.add_commands(ext_annotations[extension_name], ext_cmd_names)
def GetEnums(self, override_prefix=None):
cmd_names = []
for cmd in self.all_cmd_names.get_all_commands():
stripped = strip_api_prefix(cmd)
prefix = override_prefix or cmd[:(len(cmd) - len(stripped))]
cmd_names.append(
('%s%s' % (prefix.upper(), stripped), '%s%s' % (prefix.lower(), stripped)))
return cmd_names
class EntryPoints:
def __init__(self, api, xml, commands):
self.api = api
self._cmd_info = []
for command_node in xml.all_commands:
cmd_name = get_cmd_name(command_node)
if api == apis.WGL:
cmd_name = cmd_name if cmd_name.startswith('wgl') else 'wgl' + cmd_name
if cmd_name not in commands:
continue
param_text = ["".join(param.itertext()) for param in command_node.findall('param')]
# Treat (void) as ()
if len(param_text) == 1 and param_text[0].strip() == 'void':
param_text = []
proto = command_node.find('proto')
proto_text = "".join(proto.itertext())
self._cmd_info.append((cmd_name, command_node, param_text, proto_text))
def get_infos(self):
return self._cmd_info
def GetEGL():
egl = RegistryXML('egl.xml', 'egl_angle_ext.xml')
for major_version, minor_version in EGL_VERSIONS:
version = "%d_%d" % (major_version, minor_version)
name_prefix = "EGL_VERSION_"
feature_name = "%s%s" % (name_prefix, version)
egl.AddCommands(feature_name, version)
egl.AddExtensionCommands(supported_egl_extensions, ['egl'])
return egl
def GetGLES():
gles = RegistryXML('gl.xml', 'gl_angle_ext.xml')
for major_version, minor_version in GLES_VERSIONS:
version = "{}_{}".format(major_version, minor_version)
name_prefix = "GL_ES_VERSION_"
if major_version == 1:
name_prefix = "GL_VERSION_ES_CM_"
feature_name = "{}{}".format(name_prefix, version)
gles.AddCommands(feature_name, version)
gles.AddExtensionCommands(supported_extensions, ['gles2', 'gles1'])
return gles
|
1fa1c61c3913f82adfc7acecea9f80e4215b919e
|
08fb252e3f70e245cdd0e5a8fed47f6fada8b6f8
|
/vplanet/quantity.py
|
2a843bebb80c503d92b5dc5048c1115e55f3993d
|
[
"MIT"
] |
permissive
|
VirtualPlanetaryLaboratory/vplanet
|
3bba2126520cbe7bee1a512f87435064d3545517
|
cb683af69e80e07bc17c06e45678effdc98fc19a
|
refs/heads/main
| 2023-08-31T04:13:58.700282
| 2023-08-30T05:10:20
| 2023-08-30T05:10:20
| 138,067,409
| 128
| 54
|
MIT
| 2023-09-06T21:30:37
| 2018-06-20T17:53:00
|
C
|
UTF-8
|
Python
| false
| false
| 10,909
|
py
|
quantity.py
|
# -*- coding: utf-8 -*-
import numbers
import re
import astropy
import astropy.units as u
import numpy as np
from astropy.units.core import Unit, UnitBase, UnitsError, dimensionless_unscaled
from astropy.utils.misc import isiterable
# TODO: There may be other methods in
# https://github.com/astropy/astropy/blob/master/astropy/units/quantity.py
# that we'll need to subclass in order to get `tags` to work
class VPLANETQuantity(u.Quantity):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: http://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `Quantity` object (sequence), str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See http://docs.astropy.org/en/latest/units/
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(
cls,
value,
unit=None,
tags={},
dtype=None,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, u.Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if dtype is None:
if not copy:
return value
if value.dtype.kind in "iu":
dtype = float
return np.array(
value,
dtype=dtype,
copy=copy,
order=order,
subok=True,
ndmin=ndmin,
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
'Cannot parse "{}" as a {}. It does not '
"start with a number.".format(value, cls.__name__)
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (
isiterable(value)
and len(value) > 0
and all(isinstance(v, VPLANETQuantity) for v in value)
):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
try:
value_unit = value.unit
except AttributeError:
value_unit = None
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
"The unit attribute {!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{}".format(value.unit, exc)
)
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value,
dtype=dtype,
copy=copy,
order=order,
subok=False,
ndmin=ndmin,
)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
try:
qcls = unit._quantity_class
except AttributeError:
qcls = cls
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
# Custom tags
value.tags = tags
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
try:
unit = obj._unit
except AttributeError:
unit = None
else:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
# Custom tags
try:
self.tags = obj.tags
except AttributeError:
self.tags = {}
class NumpyQuantity(np.ndarray):
"""
A custom subclass of numpy ndarray with tags.
"""
def __new__(cls, input_array, tags={}, unit=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.tags = tags
obj.unit = unit
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
try:
self.tag = obj.tags
except AttributeError:
self.tag = {}
try:
self.unit = obj.unit
except AttributeError:
self.unit = None
def __array_wrap__(self, out_arr, context=None):
# Call the parent
return np.ndarray.__array_wrap__(self, out_arr, context)
|
3fb626fba337aa4f821a2ffdd60fdbfda17eb066
|
22a8ec81d6240acbf6503458dbd5975e677e7448
|
/tests/test_trust_list.py
|
4e2ac629da30f9eb8aaba2689f2218984ccbe5d6
|
[
"MIT"
] |
permissive
|
wbond/oscrypto
|
3f09f5e30c643aa4c3745851e0d0bd0d72d1cd14
|
1547f535001ba568b239b8797465536759c742a3
|
refs/heads/master
| 2023-08-31T10:06:11.798975
| 2023-08-23T15:53:33
| 2023-08-23T15:53:33
| 36,823,752
| 331
| 69
|
MIT
| 2023-08-23T11:03:06
| 2015-06-03T18:53:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
test_trust_list.py
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import hashlib
import os
import unittest
import sys
from oscrypto import trust_list
from asn1crypto import x509, pem
from ._unittest_compat import patch
patch()
if sys.version_info < (3,):
str_cls = unicode # noqa
byte_cls = str
else:
str_cls = str
byte_cls = bytes
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
digicert_ca_path = os.path.join(fixtures_dir, 'digicert_ca.crt')
class TrustListTests(unittest.TestCase):
def test_get_list(self):
trust_list.clear_cache()
certs = trust_list.get_list()
self.assertIsInstance(certs, list)
self.assertLess(10, len(certs))
for cert, trust_oids, reject_oids in certs:
self.assertIsInstance(cert, x509.Certificate)
self.assertIsInstance(trust_oids, set)
self.assertIsInstance(reject_oids, set)
cert.native
def test_get_list_callback(self):
trust_list.clear_cache()
lambda_data = {'calls': 0, 'reasons': 0, 'certs': {}}
def cb(cert, reason):
if reason is not None:
self.assertIsInstance(reason, str_cls)
lambda_data['reasons'] += 1
self.assertIsInstance(cert, x509.Certificate)
sha1 = hashlib.sha1(cert.dump()).digest()
message = None
if sha1 in lambda_data['certs']:
message = 'Certificate (%s) already passed to callback' % cert.subject.human_friendly
self.assertNotIn(sha1, lambda_data['certs'], message)
lambda_data['certs'][sha1] = True
lambda_data['calls'] += 1
certs = trust_list.get_list(cert_callback=cb)
self.assertIsInstance(certs, list)
self.assertLess(10, len(certs))
self.assertLessEqual(len(certs), lambda_data['calls'])
self.assertEqual(lambda_data['calls'] - len(certs), lambda_data['reasons'])
for cert, trust_oids, reject_oids in certs:
self.assertIsInstance(cert, x509.Certificate)
self.assertIsInstance(trust_oids, set)
self.assertIsInstance(reject_oids, set)
cert.native
def test_get_list_mutate(self):
trust_list.clear_cache()
certs = trust_list.get_list()
certs2 = trust_list.get_list()
with open(digicert_ca_path, 'rb') as f:
_, _, digicert_ca_bytes = pem.unarmor(f.read())
digicert_ca_cert = x509.Certificate.load(digicert_ca_bytes)
certs.append(digicert_ca_cert)
self.assertNotEqual(certs2, certs)
def test_get_path(self):
trust_list.clear_cache()
certs = trust_list.get_path()
with open(certs, 'rb') as f:
cert_data = f.read()
self.assertEqual(True, pem.detect(cert_data))
self.assertLess(10240, len(cert_data))
|
5fa5c9966074332d2ee327869e0bd1fceb756437
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/html_parsing/jut_su/get_user_achievements.py
|
cacd26be6f871e50736add0ba2c66e4bcf17fc18
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,804
|
py
|
get_user_achievements.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import re
import time
from dataclasses import dataclass
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from common import session
@dataclass
class Achievement:
icon_url: str
title: str
description: str
date_str: str
video_url: str
def get_achievements(
url: str,
start_page: int = 1,
need_total_items: int = None,
reversed: bool = False,
) -> list[Achievement]:
data = {
"ajax_load": "yes",
"start_from_page": start_page,
}
items = []
while True:
rs = session.post(url, data=data)
rs.raise_for_status()
root = BeautifulSoup(rs.text, "html.parser")
achiv_items = root.select(".achiv_all_in")
if not achiv_items:
break
for item in achiv_items:
icon_url = None
icon_style = item.select_one(".achiv_all_icon")["style"]
match = re.search(r"url\(.(.+).\)", icon_style)
if not match:
print("[-] Not found icon!")
else:
icon_url = match.group(1)
title = item.select_one(".achiv_all_text_title").get_text(strip=True)
description = item.select_one(".achiv_all_text_description").get_text(
strip=True
)
tag_date_a = item.select_one(".achiv_all_text_date > a")
date_str = tag_date_a.get_text(strip=True)
video_url = urljoin(rs.url, tag_date_a["href"])
items.append(Achievement(icon_url, title, description, date_str, video_url))
if need_total_items and len(items) >= need_total_items:
items = items[:need_total_items]
break
data["start_from_page"] += 1
time.sleep(1)
if reversed:
items.reverse()
return items
if __name__ == "__main__":
url = "https://jut.su/user/gil9red/achievements/"
items = get_achievements(url)
print(f"Achievements ({len(items)}):")
for item in items:
print(f" {item}")
"""
Achievements (2690):
Achievement(icon_url='https://gen.jut.su/uploads/achievements/icons/5699.jpg', title='Сильнее, чем он', description='Нойтора против Ичиго', date_str='сегодня в 14:07', video_url='https://jut.su/bleach/episode-190.html')
Achievement(icon_url='https://gen.jut.su/uploads/achievements/icons/5698.jpg', title='Velonica', description='Посмотрите 9 опенинг', date_str='сегодня в 13:54', video_url='https://jut.su/bleach/episode-190.html')
Achievement(icon_url='https://gen.jut.su/uploads/achievements/icons/5664.jpg', title='Chu-Bura', description='Посмотрите 8 опенинг', date_str='сегодня в 13:27', video_url='https://jut.su/bleach/episode-168.html')
...
Achievement(icon_url='https://gen.jut.su/uploads/achievements/icons/1607.jpg', title='Сага начинается', description='Джонатан встречает Дио', date_str='2 ноября 2019', video_url='https://jut.su/jojo-bizarre-adventure/season-1/episode-1.html')
Achievement(icon_url='https://gen.jut.su/uploads/achievements/icons/1606.jpg', title='Юноша из низов', description='Вы познакомились с Дио', date_str='2 ноября 2019', video_url='https://jut.su/jojo-bizarre-adventure/season-1/episode-1.html')
Achievement(icon_url='https://gen.jut.su/uploads/achievements/icons/1605.jpg', title='Благородный ДжоДжо', description='Вы познакомились с Джонатаном', date_str='2 ноября 2019', video_url='https://jut.su/jojo-bizarre-adventure/season-1/episode-1.html')
"""
|
ce24cd65aa58c45e864c9e5176dd467c415f86e8
|
d88458a65a173999df390117005fa813735e5fe2
|
/astroquery/heasarc/tests/test_heasarc_remote.py
|
a1a489b878e898e20a67dba3be0e997559e156bd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
astropy/astroquery
|
9a2793826229ba4b41ec3607ca77832036a534e9
|
51316d7417d7daf01a8b29d1df99037b9227c2bc
|
refs/heads/main
| 2023-09-01T20:52:41.625935
| 2023-08-29T23:22:40
| 2023-08-29T23:22:40
| 4,787,269
| 636
| 365
|
BSD-3-Clause
| 2023-09-14T21:56:33
| 2012-06-25T20:52:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,207
|
py
|
test_heasarc_remote.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import requests
from ...heasarc import Heasarc
from .conftest import MockResponse, parametrization_local_save_remote, skycoord_3C_273
from astroquery.exceptions import NoResultsWarning
from astropy.coordinates import SkyCoord
from astropy import units as u
@parametrization_local_save_remote
class TestHeasarc:
def test_custom_args(self):
object_name = 'Crab'
mission = 'intscw'
heasarc = Heasarc()
table = heasarc.query_object(object_name,
mission=mission,
radius='1 degree',
time="2020-09-01 .. 2020-12-01",
resultmax=10,
good_isgri=">1000",
cache=False
)
assert len(table) > 0
def test_filter_custom_args(self):
object_name = 'Crab'
mission = 'intscw'
heasarc = Heasarc()
with pytest.raises(ValueError):
heasarc.query_object(object_name,
mission=mission,
radius='1 degree',
time="2020-09-01 .. 2020-12-01",
resultmax=10,
very_good_isgri=">1000",
)
def test_basic_example(self):
mission = 'rosmaster'
object_name = '3c273'
heasarc = Heasarc()
table = heasarc.query_object(object_name, mission=mission)
assert len(table) == 63
def test_mission_list(self):
heasarc = Heasarc()
missions = heasarc.query_mission_list()
# Assert that there are indeed a large number of tables
# Number of tables could change, but should be > 900 (currently 956)
assert len(missions) > 900
def test_mission_cols(self):
heasarc = Heasarc()
mission = 'rosmaster'
cols = heasarc.query_mission_cols(mission=mission)
assert len(cols) == 29
# Test that the cols list contains known names
assert 'EXPOSURE' in cols
assert 'RA' in cols
assert 'DEC' in cols
assert 'SEARCH_OFFSET_' in cols
def test_query_object_async(self):
mission = 'rosmaster'
object_name = '3c273'
heasarc = Heasarc()
response = heasarc.query_object_async(object_name, mission=mission)
assert response is not None
assert isinstance(response, (requests.models.Response, MockResponse))
def test_query_region_async(self):
heasarc = Heasarc()
mission = 'rosmaster'
response = heasarc.query_region_async(
skycoord_3C_273, mission=mission, radius="1 degree")
assert response is not None
assert isinstance(response, (requests.models.Response, MockResponse))
def test_query_region(self):
heasarc = Heasarc()
mission = 'rosmaster'
table = heasarc.query_region(
skycoord_3C_273, mission=mission, radius="1 degree")
assert len(table) == 63
def test_query_region_nohits(self):
"""
Regression test for #2560: HEASARC returns a FITS file as a null result
"""
heasarc = Heasarc()
with pytest.warns(NoResultsWarning, match='No matching rows were found in the query.'):
# This was an example coordinate that returned nothing
# Since Fermi is still active, it is possible that sometime in the
# future an event will occur here.
table = heasarc.query_region(SkyCoord(0.28136*u.deg, -0.09789*u.deg, frame='fk5'),
mission='fermilpsc', radius=0.1*u.deg)
assert len(table) == 0
# this is to check that the header comments got parsed correctly
# I'm not certain that they will always be returned in the same order,
# so it may be necessary in the future to change this part of the test
assert 'heasarc_fermilpsc' in table.meta['COMMENT'][0]
|
219630ee0d39853389573d44f9faf0450b9a8444
|
b4f442bcca18daa73f87e4475a32bfbcf52a74c8
|
/examples/torchdata_train_example.py
|
5df4c62b3e0383f7ca3ef330c557ffe57eb05a49
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/tnt
|
2f97048783e1d6fb37edbf01e69ceb8a6e65b31d
|
1f703d5f8c0bb8f06f4ff42d8e30b2efc05856e1
|
refs/heads/master
| 2023-09-04T08:51:44.536816
| 2023-09-01T20:11:43
| 2023-09-01T20:11:43
| 76,109,394
| 1,654
| 288
|
NOASSERTION
| 2023-09-14T18:44:24
| 2016-12-10T11:49:58
|
Python
|
UTF-8
|
Python
| false
| false
| 6,584
|
py
|
torchdata_train_example.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import sys
import tempfile
from argparse import Namespace
from typing import List, Tuple
import torch
import torch.nn as nn
from torch.utils.data.datapipes.map.combinatorics import ShufflerIterDataPipe
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService
from torchdata.datapipes.iter import IterableWrapper
from torcheval.metrics import BinaryAccuracy
from torchtnt.framework.state import State
from torchtnt.framework.train import train
from torchtnt.framework.unit import TrainUnit
from torchtnt.utils import copy_data_to_device, init_from_env, seed, TLRScheduler
from torchtnt.utils.loggers import TensorBoardLogger
_logger: logging.Logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
Batch = Tuple[torch.Tensor, torch.Tensor]
def prepare_model(input_dim: int, device: torch.device) -> nn.Module:
"""
Instantiate a nn.Module which will define the architecture of your model.
See https://pytorch.org/docs/stable/generated/torch.nn.Module.html for docs.
"""
return nn.Linear(input_dim, 1, device=device)
# pyre-fixme[24]: Generic type `ShufflerIterDataPipe` expects 1 type parameter.
def _generate_datapipe(num_samples: int, input_dim: int) -> ShufflerIterDataPipe:
"""
Returns a shuffled datapipe of random inputs and labels.
"""
data = IterableWrapper(
[torch.rand(input_dim, input_dim) for _ in range(num_samples)]
)
labels = IterableWrapper(
[
torch.rand(
input_dim,
)
for _ in range(num_samples)
]
)
train_data_pipe = data.zip(labels).shuffle()
return train_data_pipe
def prepare_dataloader(
num_samples: int, input_dim: int, num_workers: int, device: torch.device
) -> DataLoader2:
"""
Prepare the datapipe and instantiate the DataLoader2 for training.
"""
# pin memory enables faster host to GPU copies
# enable_pin_memory = device.type == "cuda"
# To be replaced by pin_memory DataPipe https://github.com/pytorch/data/issues/1013
train_data_pipe = _generate_datapipe(num_samples, input_dim)
dataloader = DataLoader2(
datapipe=train_data_pipe,
reading_service=MultiProcessingReadingService(num_workers=num_workers),
)
return dataloader
class MyTrainUnit(TrainUnit[Batch]):
def __init__(
self,
module: torch.nn.Module,
optimizer: torch.optim.Optimizer,
lr_scheduler: TLRScheduler,
device: torch.device,
train_accuracy: BinaryAccuracy,
tb_logger: TensorBoardLogger,
log_every_n_steps: int,
) -> None:
super().__init__()
self.module = module
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.device = device
# create an accuracy Metric to compute the accuracy of training
self.train_accuracy = train_accuracy
self.log_every_n_steps = log_every_n_steps
self.tb_logger = tb_logger
def train_step(self, state: State, data: Batch) -> None:
data = copy_data_to_device(data, self.device)
inputs, targets = data
# convert targets to float Tensor for binary_cross_entropy_with_logits
targets = targets.float()
outputs = self.module(inputs)
outputs = torch.squeeze(outputs)
loss = torch.nn.functional.binary_cross_entropy_with_logits(outputs, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
# update metrics & logs
self.train_accuracy.update(outputs, targets)
step_count = self.train_progress.num_steps_completed
if (step_count + 1) % self.log_every_n_steps == 0:
accuracy = self.train_accuracy.compute()
self.tb_logger.log("loss", loss, step_count)
self.tb_logger.log("accuracy", accuracy, step_count)
def on_train_epoch_end(self, state: State) -> None:
# compute and log the metrics at the end of epoch
step_count = self.train_progress.num_steps_completed
accuracy = self.train_accuracy.compute()
self.tb_logger.log("accuracy_epoch", accuracy, step_count)
# reset the metric at the end of every epoch
self.train_accuracy.reset()
# step the learning rate scheduler
self.lr_scheduler.step()
def main(argv: List[str]) -> None:
args = get_args(argv)
seed(args.seed)
# device and process group initialization
device = init_from_env()
path = tempfile.mkdtemp()
tb_logger = TensorBoardLogger(path)
model = prepare_model(args.input_dim, device)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
train_accuracy = BinaryAccuracy(device=device)
dataloader = prepare_dataloader(
args.num_batches_per_epoch,
args.input_dim,
args.num_reading_service_workers,
device,
)
train_unit = MyTrainUnit(
module=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
device=device,
train_accuracy=train_accuracy,
tb_logger=tb_logger,
log_every_n_steps=args.log_every_n_steps,
)
train(
train_unit,
train_dataloader=dataloader,
max_epochs=args.max_epochs,
)
def get_args(argv: List[str]) -> Namespace:
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--input-dim", type=int, default=32, help="input dimension")
parser.add_argument("--max-epochs", type=int, default=2, help="training epochs")
parser.add_argument(
"--num-batches-per-epoch",
type=int,
default=1024,
help="number of batches per epoch",
)
parser.add_argument(
"--num-reading-service-workers",
type=int,
default=2,
help="num of reading service workers",
)
parser.add_argument("--lr", type=float, default=0.1, help="learning rate")
parser.add_argument(
"--log-every-n-steps", type=int, default=10, help="log every n steps"
)
return parser.parse_args(argv)
if __name__ == "__main__":
main(sys.argv[1:])
|
16b6859c82d895c845fad4c20ffa674ff8f63443
|
c86612b6d082cb258dc446d2bff3f47dbe79e2a5
|
/pyexfil/Stega/zipception/__init__.py
|
b2fe52c971debaab23aefb9e5930be1810c84bb1
|
[
"MIT"
] |
permissive
|
ytisf/PyExfil
|
a596d098324b5f436b52a7f385500ef29b883e45
|
d544783bc7fcab900034ece10a48ac75ee333fe4
|
refs/heads/master
| 2023-08-23T10:37:48.316985
| 2023-02-17T03:12:33
| 2023-02-17T03:12:33
| 27,238,179
| 700
| 152
|
MIT
| 2020-12-03T15:18:46
| 2014-11-27T19:06:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
__init__.py
|
import os
import sys
import random
from progressbar import *
from zipfile import ZipFile
from pyexfil.includes.encryption_wrappers import PYEXFIL_DEFAULT_PASSWORD
NUMBER_OF_ITERATIONS = random.randint(1345, 6548)
def _get_files_from_dir(dir_page):
file_paths = []
for root, directories, files in os.walk(dir_page):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
def _decompress(zip_h):
zip_h.extractall()
return
class Broker:
def __init__(self, file_path, key=PYEXFIL_DEFAULT_PASSWORD):
self.file_path = file_path
self.key = key
self.current_iteration = 0
self.current_file = None
self.end = False
def Run(self):
self.current_file = self.file_path
while self.end is False:
zipfile = ZipFile(self.current_file, 'r')
if self.key is not None:
zipfile.setpassword(self.key)
_decompress(zipfile)
self.current_file = zipfile.filelist[0].filename
self.current_iteration += 1
if self.current_file != "True.zip" and self.current_file != "False.zip":
self.end = True
sys.stdout.write("Finished with %s iterations.\n" % self.current_iteration)
class Sender:
def __init__(self, folder_path, key=PYEXFIL_DEFAULT_PASSWORD, iterations=NUMBER_OF_ITERATIONS):
self.all_files = _get_files_from_dir(folder_path)
self.key = key
self.iterations = iterations
def Run(self):
sys.stdout.write("Starting with %s iterations.\n" % self.iterations)
widgets = ['Compressing: ', Percentage(), ' ', Bar(marker='#', left='[', right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=self.iterations)
pbar.start()
for i in range(1, self.iterations):
even = (i % 2 == 0)
pbar.update(i)
with ZipFile('%s.zip' % even, 'w') as zip:
if self.key is not None:
zip.setpassword(bytes(self.key))
for file in self.all_files:
zip.write(file)
self.all_files = ['%s.zip' % (even)]
try:
os.remove('%s.zip' % str(not even))
except:
pass
pbar.finish()
sys.stdout.write("Completed with %s iterations.\n" % self.iterations)
|
8794d107b4363fe89643189de6a4ed3bd6e69e0d
|
48fdd1305ea3b6aad7537909b2ee0764c991d4e8
|
/workflow/scripts/enhance_bcf.py
|
1067247b408104da3c280b5f8d3baae5aef2050c
|
[
"Apache-2.0"
] |
permissive
|
cbg-ethz/V-pipe
|
e62418cbe4f45f040bb6c071a08144dedf98281d
|
8c4599509955b33f761ded2889d68777628a4f12
|
refs/heads/master
| 2023-08-22T05:34:52.941749
| 2023-03-17T21:37:28
| 2023-03-17T22:11:55
| 87,789,709
| 115
| 38
|
Apache-2.0
| 2023-05-11T18:34:46
| 2017-04-10T09:01:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
enhance_bcf.py
|
#!/usr/bin/env python3
"""Amend genotype field to contain all variants."""
import sys
from cyvcf2 import VCF, Writer
def main(fname_in, fname_out, ambiguous_base_coverage_threshold):
"""
ambiguous_base_coverage_threshold:
frequency threshold to include a variant in computation of ambiguous code
"""
vcf_reader = VCF(fname_in)
vcf_writer = Writer(fname_out, vcf_reader)
for variant in vcf_reader:
base_list = [variant.REF] + variant.ALT
coverage_list = variant.INFO.get("AD")
total_coverage = sum(coverage_list)
assert len(base_list) == len(coverage_list)
# genotype 0 is reference (base is not really needed)
genotype = [
i
for i, (base, coverage) in enumerate(zip(base_list, coverage_list))
if coverage / total_coverage >= ambiguous_base_coverage_threshold
]
variant.genotypes = [[*genotype, False]]
vcf_writer.write_record(variant)
vcf_writer.close()
vcf_reader.close()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], float(sys.argv[3]))
|
a008385237abb2014f09fddede075d4cd53bd925
|
5c1746c4ae9f5eb4c94c9b3a70a4d3feb966ceda
|
/pcapkit/protocols/data/internet/ipv6_route.py
|
ab8848f305002bf624ad31b6df8743e320800d9a
|
[
"BSD-3-Clause"
] |
permissive
|
JarryShaw/PyPCAPKit
|
8b53c76cf54f2ef1a9e4d0a7aeb3d52605dc1d5a
|
a6fe49ec58f09e105bec5a00fb66d9b3f22730d9
|
refs/heads/main
| 2023-08-29T12:49:58.611378
| 2023-08-28T14:05:43
| 2023-08-28T14:05:43
| 109,791,841
| 204
| 29
|
BSD-3-Clause
| 2023-09-11T17:09:06
| 2017-11-07T05:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,547
|
py
|
ipv6_route.py
|
# -*- coding: utf-8 -*-
"""data model for IPv6 Routing Header"""
from typing import TYPE_CHECKING
from pcapkit.corekit.infoclass import info_final
from pcapkit.protocols.data.data import Data
if TYPE_CHECKING:
from ipaddress import IPv6Address
from pcapkit.const.ipv6.routing import Routing
from pcapkit.const.reg.transtype import TransType
__all__ = [
'IPv6_Route',
'UnknownType', 'SourceRoute', 'Type2', 'RPL',
]
class IPv6_Route(Data):
"""Data model for IPv6-Route protocol."""
#: Next header.
next: 'TransType'
#: Header extension length.
length: 'int'
#: Routing type.
type: 'Routing'
#: Segments left.
seg_left: 'int'
@info_final
class UnknownType(IPv6_Route):
"""Data model for IPv6-Route unknown type."""
#: Data.
data: 'bytes'
if TYPE_CHECKING:
def __init__(self, next: 'TransType', length: 'int', type: 'Routing', seg_left: 'int',
data: 'bytes') -> 'None': ... # pylint: disable=unused-argument,multiple-statements,super-init-not-called,redefined-builtin,line-too-long
@info_final
class SourceRoute(IPv6_Route):
"""Data model for IPv6-Route Source Route data type."""
#: Source addresses.
ip: 'tuple[IPv6Address, ...]'
if TYPE_CHECKING:
def __init__(self, next: 'TransType', length: 'int', type: 'Routing', seg_left: 'int',
ip: 'tuple[IPv6Address, ...]') -> 'None': ... # pylint: disable=unused-argument,multiple-statements,super-init-not-called,redefined-builtin,line-too-long
@info_final
class Type2(IPv6_Route):
"""Data model for IPv6-Route Type 2 data type."""
#: Address.
ip: 'IPv6Address'
if TYPE_CHECKING:
def __init__(self, next: 'TransType', length: 'int', type: 'Routing', seg_left: 'int',
ip: 'IPv6Address') -> 'None': ... # pylint: disable=unused-argument,multiple-statements,super-init-not-called,redefined-builtin,line-too-long
@info_final
class RPL(IPv6_Route):
"""Data model for RPL Source data type."""
#: CmprI.
cmpr_i: 'int'
#: CmprE.
cmpr_e: 'int'
#: Pad.
pad: 'int'
#: Addresses.
ip: 'tuple[IPv6Address | bytes, ...]'
if TYPE_CHECKING:
def __init__(self, next: 'TransType', length: 'int', type: 'Routing', seg_left: 'int',
cmpr_i: 'int', cmpr_e: 'int', pad: 'int', ip: 'tuple[IPv6Address | bytes, ...]') -> 'None': ... # pylint: disable=unused-argument,multiple-statements,super-init-not-called,redefined-builtin,line-too-long
|
bcf1b9c13fa954c345b9ae9778b1cea8e402d049
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/hypertext/src/poincare.py
|
f4352e3f34ff59bdfc8dd2bfbaf3ca6bfe02756d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,596
|
py
|
poincare.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""poincare file"""
import mindspore.numpy as mnp
from mindspore.nn import Cell, Norm
from mindspore.ops import Shape, ReduceSum, Sqrt, ExpandDims, Tanh, Transpose, matmul, Pow, Reshape, clip_by_value
import mindspore.common.dtype as mstype
from src.math_utils import Artanh
class LorentzFactors(Cell):
"""lorentz_factors class"""
def __init__(self, min_norm):
"""init"""
super(LorentzFactors, self).__init__()
self.min_norm = min_norm
self.norm = Norm(axis=-1)
def construct(self, x):
"""class construction"""
x_norm = self.norm(x)
return 1.0 / (1.0 - x_norm ** 2 + self.min_norm)
class ClampMin(Cell):
"""clamp_min class"""
def __init__(self):
"""init fun"""
super(ClampMin, self).__init__()
self.shape = Shape()
def construct(self, tensor, min1):
"""class construction"""
min_mask = (tensor <= min1)
min_mask1 = (tensor >= min1)
min_add = mnp.ones(self.shape(tensor)) * min1 * min_mask
return tensor * min_mask1 + min_add
class Proj(Cell):
"""proj class"""
def __init__(self, min_norm):
"""init fun"""
super(Proj, self).__init__()
self.clamp_min = ClampMin()
self.min_norm = min_norm
self.norm_k = Norm(axis=-1, keep_dims=True)
self.maxnorm = 1 - 4e-3
def construct(self, x, c):
"""class construction"""
norm = self.clamp_min(self.norm_k(x), self.min_norm)
maxnorm = self.maxnorm / (c ** 0.5)
cond = norm > maxnorm
projected = x / norm * maxnorm
return mnp.where(cond, projected, x)
class Clamp(Cell):
"""clamp class"""
def __init__(self):
super(Clamp, self).__init__()
self.shape = Shape()
def construct(self, tensor, min1, max1):
"""class construction"""
return clip_by_value(tensor, min1, max1)
class Logmap0(Cell):
"""logmap0 class"""
def __init__(self, min_norm):
"""init fun"""
super(Logmap0, self).__init__()
self.min_norm = min_norm
self.norm_k = Norm(axis=-1, keep_dims=True)
self.artanh = Artanh()
self.norm_k = Norm(axis=-1, keep_dims=True)
self.clamp_min = ClampMin()
def construct(self, p, c):
"""class construction"""
sqrt_c = c ** 0.5
p_norm = self.clamp_min(self.norm_k(p), self.min_norm)
scale = 1. / sqrt_c * self.artanh(sqrt_c * p_norm) / p_norm
return scale * p
class KleinToPoincare(Cell):
"""klein to poincare class"""
def __init__(self, min_norm):
"""init"""
super(KleinToPoincare, self).__init__()
self.min_norm = min_norm
self.sqrt = Sqrt()
self.sum = ReduceSum(keep_dims=True)
self.proj = Proj(self.min_norm)
def construct(self, x, c):
"""class construction"""
x_poincare = x / (1.0 + self.sqrt(1.0 - self.sum(x * x, -1)))
x_poincare = self.proj(x_poincare, c)
return x_poincare
class ToKlein(Cell):
"""to klein class"""
def __init__(self, min_norm):
"""init fun"""
super(ToKlein, self).__init__()
self.min_norm = min_norm
self.sum = ReduceSum(keep_dims=True)
self.klein_constraint = KleinConstraint(self.min_norm)
def construct(self, x, c):
"""class construction"""
x_2 = self.sum(x * x, -1)
x_klein = 2 * x / (1.0 + x_2)
x_klein = self.klein_constraint(x_klein)
return x_klein
class KleinConstraint(Cell):
"""klein constraint class"""
def __init__(self, min_norm):
"""init fun"""
super(KleinConstraint, self).__init__()
self.norm = Norm(axis=-1)
self.min_norm = min_norm
self.maxnorm = 1 - 4e-3
self.shape = Shape()
self.reshape = Reshape()
def construct(self, x):
"""class construction"""
last_dim_val = self.shape(x)[-1]
norm = self.reshape(self.norm(x), (-1, 1))
maxnorm = self.maxnorm
cond = norm > maxnorm
x_reshape = self.reshape(x, (-1, last_dim_val))
projected = x_reshape / (norm + self.min_norm) * maxnorm
x_reshape = mnp.where(cond, projected, x_reshape)
x = self.reshape(x_reshape, self.shape(x))
return x
class EinsteinMidpoint(Cell):
"""einstein mindpoint class"""
def __init__(self, min_norm):
"""init fun"""
super(EinsteinMidpoint, self).__init__()
self.to_klein = ToKlein(min_norm)
self.lorentz_factors = LorentzFactors(min_norm)
self.sum = ReduceSum(keep_dims=True)
self.unsqueeze = ExpandDims()
self.sumFalse = ReduceSum(keep_dims=False)
self.klein_constraint = KleinConstraint(min_norm)
self.klein_to_poincare = KleinToPoincare(min_norm)
def construct(self, x, c):
"""class construction"""
x = self.to_klein(x, c)
x_lorentz = self.lorentz_factors(x)
x_norm = mnp.norm(x, axis=-1)
# deal with pad value
x_lorentz = (1.0 - (x_norm == 0.0).astype(mstype.float32)) * x_lorentz
x_lorentz_sum = self.sum(x_lorentz, -1)
x_lorentz_expand = self.unsqueeze(x_lorentz, -1)
x_midpoint = self.sumFalse(x_lorentz_expand * x, 1) / x_lorentz_sum
x_midpoint = self.klein_constraint(x_midpoint)
x_p = self.klein_to_poincare(x_midpoint, c)
return x_p
class ClampTanh(Cell):
"""clamp tanh class"""
def __init__(self):
"""init fun"""
super(ClampTanh, self).__init__()
self.clamp = Clamp()
self.tanh = Tanh()
def construct(self, x, c=15):
"""class construction"""
return self.tanh(self.clamp(x, -c, c))
class MobiusMatvec(Cell):
"""mobius matvec class"""
def __init__(self, min_norm):
"""init fun"""
super(MobiusMatvec, self).__init__()
self.min_norm = min_norm
self.norm_k = Norm(axis=-1, keep_dims=True)
self.artanh = Artanh()
self.norm_k = Norm(axis=-1, keep_dims=True)
self.clamp_min = ClampMin()
self.transpose = Transpose()
self.clamp_tanh = ClampTanh()
def construct(self, m, x, c):
"""class construction"""
sqrt_c = c ** 0.5
x_norm = self.clamp_min(self.norm_k(x), self.min_norm)
mx = matmul(x, self.transpose(m, (1, 0)))
mx_norm = self.clamp_min(self.norm_k(x), self.min_norm)
t1 = self.artanh(sqrt_c * x_norm)
t2 = self.clamp_tanh(mx_norm / x_norm * t1)
res_c = t2 * mx / (mx_norm * sqrt_c)
cond = mnp.array([[0]] * len(mx))
res_0 = mnp.zeros(1)
res = mnp.where(cond, res_0, res_c)
return res
class Expmap0(Cell):
"""expmap0 class"""
def __init__(self, min_norm):
"""init fun"""
super(Expmap0, self).__init__()
self.clamp_min = ClampMin()
self.min_norm = min_norm
self.clamp_tanh = ClampTanh()
self.norm_k = Norm(axis=-1, keep_dims=True)
def construct(self, u, c):
"""constructfun"""
sqrt_c = c ** 0.5
u_norm = self.clamp_min(self.norm_k(u), self.min_norm)
gamma_1 = self.clamp_tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)
return gamma_1
class MobiusAdd(Cell):
"""mobius add"""
def __init__(self, min_norm):
"""init fun"""
super(MobiusAdd, self).__init__()
self.pow = Pow()
self.sum = ReduceSum(keep_dims=True)
self.clamp_min = ClampMin()
self.min_norm = min_norm
def construct(self, x, y, c, dim=-1):
"""constructfun"""
x2 = self.sum(self.pow(x, 2), dim)
y2 = self.sum(self.pow(y, 2), dim)
xy = self.sum(x * y, dim)
num = (1 + 2 * c * xy + c * y2) * x + (1 - c * x2) * y
denom = 1 + 2 * c * xy + c ** 2 * x2 * y2
return num / self.clamp_min(denom, self.min_norm)
|
10b6e22221792e8f770a037fa50f8cb34dc31d95
|
532ad1aedff8528b2e8af4e4e752f32d58b92b0d
|
/aesara/typed_list/rewriting.py
|
2ec5d412eee6b5d7a248d7d6bd73abfde8e65dad
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
aesara-devs/aesara
|
ebaa204159d4ddb94ede10580c5b8e39d114713f
|
b5a3cf45f0f6762bb4bb0e2c657f7d3822c74595
|
refs/heads/main
| 2023-08-09T10:56:56.528283
| 2023-07-15T06:15:49
| 2023-07-15T13:28:29
| 221,231,590
| 861
| 142
|
NOASSERTION
| 2023-09-05T03:16:16
| 2019-11-12T14:02:08
|
Python
|
UTF-8
|
Python
| false
| false
| 779
|
py
|
rewriting.py
|
from aesara.compile import optdb
from aesara.graph.rewriting.basic import WalkingGraphRewriter, node_rewriter
from aesara.typed_list.basic import Append, Extend, Insert, Remove, Reverse
@node_rewriter([Append, Extend, Insert, Reverse, Remove], inplace=True)
def typed_list_inplace_rewrite(fgraph, node):
if (
isinstance(node.op, (Append, Extend, Insert, Reverse, Remove))
and not node.op.inplace
):
new_op = node.op.__class__(inplace=True)
new_node = new_op(*node.inputs)
return [new_node]
return False
optdb.register(
"typed_list_inplace_rewrite",
WalkingGraphRewriter(
typed_list_inplace_rewrite, failure_callback=WalkingGraphRewriter.warn_inplace
),
"fast_run",
"inplace",
position=60,
)
|
37b7ffbabd6d0cd2cd67370bfb1bb03d8eb41341
|
ef20f96c497d1bb80889c80ebaab1d6929634bfd
|
/src/python/tensorflow_cloud/tuner/tests/integration/distributing_cloudtuner_integration_test.py
|
592ba7107dedb26bcce25ccb3fa5619a87f16158
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/cloud
|
55315e12b3145b73040660cb9f558439ecef0c03
|
dfd9ca1fe2200d10584c09ab8c3b392c82871d99
|
refs/heads/master
| 2023-08-19T03:07:10.122202
| 2022-12-22T21:33:50
| 2022-12-22T21:34:29
| 239,587,597
| 385
| 157
|
Apache-2.0
| 2023-04-27T20:55:18
| 2020-02-10T18:51:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,364
|
py
|
distributing_cloudtuner_integration_test.py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for Distributing Cloud Tuner."""
import contextlib
import io
import os
import re
import keras_tuner
import tensorflow as tf
from tensorflow import keras
from tensorflow_cloud.tuner import vizier_client
from tensorflow_cloud.tuner.tuner import DistributingCloudTuner
# If input dataset is created outside tuner.search(),
# it requires eager execution even in TF 1.x.
if tf.version.VERSION.split(".")[0] == "1":
tf.compat.v1.enable_eager_execution()
# The project id to use to run tests.
_PROJECT_ID = os.environ["PROJECT_ID"]
# The GCP region in which the end-to-end test is run.
_REGION = os.environ["REGION"]
# Study ID for testing
_STUDY_ID_BASE = "dct_{}".format((os.environ["BUILD_ID"]).replace("-", "_"))
# The base docker image to use for the remote environment.
_DOCKER_IMAGE = os.environ["DOCKER_IMAGE"]
# The staging bucket to use to copy the model and data for the remote run.
_REMOTE_DIR = os.path.join("gs://", os.environ["TEST_BUCKET"], _STUDY_ID_BASE)
# The search space for hyperparameters
_HPS = keras_tuner.HyperParameters()
_HPS.Float("learning_rate", min_value=1e-4, max_value=1e-2, sampling="log")
_HPS.Int("num_layers", 2, 10)
def _load_data(dir_path=None):
"""Loads and prepares data."""
mnist_file_path = None
if dir_path:
mnist_file_path = os.path.join(dir_path, "mnist.npz")
(x, y), (val_x, val_y) = keras.datasets.mnist.load_data(mnist_file_path)
x = x.astype("float32") / 255.0
val_x = val_x.astype("float32") / 255.0
return ((x[:10000], y[:10000]), (val_x, val_y))
def _build_model(hparams):
# Note that CloudTuner does not support adding hyperparameters in
# the model building function. Instead, the search space is configured
# by passing a hyperparameters argument when instantiating (constructing)
# the tuner.
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
# Build the model with number of layers from the hyperparameters
for _ in range(hparams.get("num_layers")):
model.add(keras.layers.Dense(units=64, activation="relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
# Compile the model with learning rate from the hyperparameters
model.compile(
optimizer=keras.optimizers.Adam(lr=hparams.get("learning_rate")),
loss="sparse_categorical_crossentropy",
metrics=["acc"],
)
return model
class _DistributingCloudTunerIntegrationTestBase(tf.test.TestCase):
def setUp(self):
super(_DistributingCloudTunerIntegrationTestBase, self).setUp()
self._study_id = None
def _assert_output(self, fn, regex_str):
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
fn()
output = stdout.getvalue()
self.assertRegex(output, re.compile(regex_str, re.DOTALL))
def _assert_results_summary(self, fn):
self._assert_output(
fn, ".*Results summary.*Trial summary.*Hyperparameters.*")
def _delete_dir(self, path) -> None:
"""Deletes a directory if exists."""
if tf.io.gfile.isdir(path):
tf.io.gfile.rmtree(path)
def tearDown(self):
super(_DistributingCloudTunerIntegrationTestBase, self).tearDown()
# Delete the study used in the test, if present
if self._study_id:
service = vizier_client.create_or_load_study(
_PROJECT_ID, _REGION, self._study_id, None)
service.delete_study()
tf.keras.backend.clear_session()
# Delete log files, saved_models and other training assets
self._delete_dir(_REMOTE_DIR)
class DistributingCloudTunerIntegrationTest(
_DistributingCloudTunerIntegrationTestBase):
def setUp(self):
super(DistributingCloudTunerIntegrationTest, self).setUp()
(self._x, self._y), (self._val_x, self._val_y) = _load_data(
self.get_temp_dir())
def testCloudTunerHyperparameters(self):
"""Test case to configure Distributing Tuner with HyperParameters."""
study_id = "{}_hyperparameters".format(_STUDY_ID_BASE)
self._study_id = study_id
tuner = DistributingCloudTuner(
_build_model,
project_id=_PROJECT_ID,
region=_REGION,
objective="acc",
hyperparameters=_HPS,
max_trials=2,
study_id=study_id,
directory=_REMOTE_DIR,
container_uri=_DOCKER_IMAGE
)
tuner.search(
x=self._x,
y=self._y,
epochs=2,
validation_data=(self._val_x, self._val_y),
)
self._assert_results_summary(tuner.results_summary)
if __name__ == "__main__":
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.