blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a0dd434f8cd8d79756d2431dad7228d9f78abf8
|
ff0588bb35375440f44f19350a361028bdfd9735
|
/examples/threathunter/threat_intelligence/stix_parse.py
|
8663c5494e895d038fe19ea3c83bb77d2253afef
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
carbonblack/cbapi-python
|
c277bcf7fceeb3dda607be08ce3f425c70d3a833
|
32dd08d2185f7113f87834002e720db31c8c910e
|
refs/heads/master
| 2023-02-08T07:24:14.464026
| 2023-02-01T18:11:25
| 2023-02-01T18:11:25
| 52,364,316
| 158
| 123
|
NOASSERTION
| 2023-02-01T18:11:27
| 2016-02-23T14:21:22
|
Python
|
UTF-8
|
Python
| false
| false
| 14,872
|
py
|
stix_parse.py
|
"""Parses STIX observables from the XML data returned by the TAXII server.
The following IOC types are extracted from STIX data:
* MD5 Hashes
* Domain Names
* IP-Addresses
* IP-Address Ranges
"""
from cybox.objects.domain_name_object import DomainName
from cybox.objects.address_object import Address
from cybox.objects.file_object import File
from cybox.objects.uri_object import URI
from lxml import etree
from io import BytesIO
from stix.core import STIXPackage
import logging
import string
import socket
import uuid
import time
import datetime
import dateutil
import dateutil.tz
import re
from cabby.constants import (
CB_STIX_XML_111, CB_CAP_11, CB_SMIME,
CB_STIX_XML_10, CB_STIX_XML_101, CB_STIX_XML_11, CB_XENC_122002)
CB_STIX_XML_12 = 'urn:stix.mitre.org:xml:1.2'
BINDING_CHOICES = [CB_STIX_XML_111, CB_CAP_11, CB_SMIME, CB_STIX_XML_12,
CB_STIX_XML_10, CB_STIX_XML_101, CB_STIX_XML_11,
CB_XENC_122002]
logger = logging.getLogger(__name__)
domain_allowed_chars = string.printable[:-6] # Used by validate_domain_name function
def validate_domain_name(domain_name):
"""Validates a domain name to ensure validity and saneness.
Args:
domain_name: Domain name string to check.
Returns:
True if checks pass, False otherwise.
"""
if len(domain_name) > 255:
logger.warn(
"Excessively long domain name {} in IOC list".format(domain_name))
return False
if not all([c in domain_allowed_chars for c in domain_name]):
logger.warn("Malformed domain name {} in IOC list".format(domain_name))
return False
parts = domain_name.split('.')
if not parts:
logger.warn("Empty domain name found in IOC list")
return False
for part in parts:
if len(part) < 1 or len(part) > 63:
logger.warn("Invalid label length {} in domain name {} for report %s".format(
part, domain_name))
return False
return True
def validate_md5sum(md5):
"""Validates md5sum.
Args:
md5sum: md5sum to check.
Returns:
True if checks pass, False otherwise.
"""
if 32 != len(md5):
logger.warn("Invalid md5 length for md5 {}".format(md5))
return False
if not md5.isalnum():
logger.warn("Malformed md5 {} in IOC list".format(md5))
return False
for c in "ghijklmnopqrstuvwxyz":
if c in md5 or c.upper() in md5:
logger.warn("Malformed md5 {} in IOC list".format(md5))
return False
return True
def sanitize_id(id):
"""Removes unallowed chars from an ID.
Ids may only contain a-z, A-Z, 0-9, - and must have one character.
Args:
id: the ID to be sanitized.
Returns:
A sanitized ID.
"""
return id.replace(':', '-')
def validate_ip_address(ip_address):
"""Validates an IPv4 address."""
try:
socket.inet_aton(ip_address)
return True
except socket.error:
return False
def cybox_parse_observable(observable, indicator, timestamp, score):
"""Parses a cybox observable and returns a list containing a report dictionary.
cybox is a open standard language encoding info about cyber observables.
Args:
observable: the cybox obserable to parse.
Returns:
A report dictionary if the cybox observable has props of type:
cybox.objects.address_object.Address,
cybox.objects.file_object.File,
cybox.objects.domain_name_object.DomainName, or
cybox.objects.uri_object.URI
Otherwise it will return an empty list.
"""
reports = []
if observable.object_ and observable.object_.properties:
props = observable.object_.properties
logger.debug("{0} has props type: {1}".format(indicator, type(props)))
else:
logger.debug("{} has no props; skipping".format(indicator))
return reports
#
# sometimes the description is None
#
description = ''
if observable.description and observable.description.value:
description = str(observable.description.value)
#
# if description is an empty string, then use the indicator's description
# NOTE: This was added for RecordedFuture
#
if not description and indicator and indicator.description and indicator.description.value:
description = str(indicator.description.value)
#
# if description is still empty, use the indicator's title
#
if not description and indicator and indicator.title:
description = str(indicator.title)
#
# use the first reference as a link
# This was added for RecordedFuture
#
link = ''
if indicator and indicator.producer and indicator.producer.references:
for reference in indicator.producer.references:
link = reference
break
else:
if indicator and indicator.title:
split_title = indicator.title.split()
title_found = True
elif observable and observable.title:
split_title = observable.title.split()
title_found = True
else:
title_found = False
if title_found:
url_pattern = re.compile("^(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?[a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,5}(:[0-9]{1,5})?(\/.*)?$")
for token in split_title:
if url_pattern.match(token):
link = token
break
#
# Sometimes the title is None, so generate a random UUID
#
if observable and observable.title:
title = observable.title
else:
title = str(uuid.uuid4())
# ID must be unique. Collisions cause 500 error on Carbon Black backend
id = str(uuid.uuid4())
if type(props) == DomainName:
# go into domainname function
reports = parse_domain_name_observable(observable, props, id, description, title, timestamp, link, score)
elif type(props) == Address:
reports = parse_address_observable(observable, props, id, description, title, timestamp, link, score)
elif type(props) == File:
reports = parse_file_observable(observable, props, id, description, title, timestamp, link, score)
elif type(props) == URI:
reports = parse_uri_observable(observable, props, id, description, title, timestamp, link, score)
else:
return reports
return reports
def parse_uri_observable(observable, props, id, description, title, timestamp, link, score):
reports = []
if props.value and props.value.value:
iocs = {'netconn_domain': []}
#
# Sometimes props.value.value is a list
#
if type(props.value.value) is list:
for domain_name in props.value.value:
if validate_domain_name(domain_name.strip()):
iocs['netconn_domain'].append(domain_name.strip())
else:
domain_name = props.value.value.strip()
if validate_domain_name(domain_name):
iocs['netconn_domain'].append(domain_name)
if len(iocs['netconn_domain']) > 0:
reports.append({'iocs_v2': iocs,
'id': sanitize_id(id),
'description': description,
'title': title,
'timestamp': timestamp,
'link': link,
'score': score})
return reports
def parse_domain_name_observable(observable, props, id, description, title, timestamp, link, score):
reports = []
if props.value and props.value.value:
iocs = {'netconn_domain': []}
#
# Sometimes props.value.value is a list
#
if type(props.value.value) is list:
for domain_name in props.value.value:
if validate_domain_name(domain_name.strip()):
iocs['netconn_domain'].append(domain_name.strip())
else:
domain_name = props.value.value.strip()
if validate_domain_name(domain_name):
iocs['netconn_domain'].append(domain_name)
if len(iocs['netconn_domain']) > 0:
reports.append({'iocs_v2': iocs,
'id': sanitize_id(id),
'description': description,
'title': title,
'timestamp': timestamp,
'link': link,
'score': score})
return reports
def parse_address_observable(observable, props, id, description, title, timestamp, link, score):
reports = []
if props.category == 'ipv4-addr' and props.address_value:
iocs = {'netconn_ipv4': []}
#
# Sometimes props.address_value.value is a list vs a string
#
if type(props.address_value.value) is list:
for ip in props.address_value.value:
if validate_ip_address(ip.strip()):
iocs['netconn_ipv4'].append(ip.strip())
else:
ipv4 = props.address_value.value.strip()
if validate_ip_address(ipv4):
iocs['netconn_ipv4'].append(ipv4)
if len(iocs['netconn_ipv4']) > 0:
reports.append({'iocs_v2': iocs,
'id': sanitize_id(observable.id_),
'description': description,
'title': title,
'timestamp': timestamp,
'link': link,
'score': score})
return reports
def parse_file_observable(observable, props, id, description, title, timestamp, link, score):
reports = []
iocs = {'hash': []}
if props.md5:
if type(props.md5) is list:
for hash in props.md5:
if validate_md5sum(hash.strip()):
iocs['hash'].append(hash.strip())
else:
if hasattr(props.md5, 'value'):
hash = props.md5.value.strip()
else:
hash = props.md5.strip()
if validate_md5sum(hash):
iocs['hash'].append(hash)
if len(iocs['hash']) > 0:
reports.append({'iocs_v2': iocs,
'id': sanitize_id(id),
'description': description,
'title': title,
'timestamp': timestamp,
'link': link,
'score': score})
return reports
def get_stix_indicator_score(indicator, default_score):
"""Returns a digit representing the indicator score.
Converts from "high", "medium", or "low" into a digit, if necessary.
"""
if not indicator.confidence:
return default_score
confidence_val_str = indicator.confidence.value.__str__()
if confidence_val_str.isdigit():
score = int(confidence_val_str)
return score
elif confidence_val_str.lower() == "high":
return 7 # 75
elif confidence_val_str.lower() == "medium":
return 5 # 50
elif confidence_val_str.lower() == "low":
return 2 # 25
else:
return default_score
def get_stix_indicator_timestamp(indicator):
timestamp = 0
if indicator.timestamp:
if indicator.timestamp.tzinfo:
timestamp = int((indicator.timestamp -
datetime.datetime(1970, 1, 1).replace(
tzinfo=dateutil.tz.tzutc())).total_seconds())
else:
timestamp = int((indicator.timestamp -
datetime.datetime(1970, 1, 1)).total_seconds())
return timestamp
def get_stix_package_timestamp(stix_package):
timestamp = 0
if not stix_package or not stix_package.timestamp:
return timestamp
try:
timestamp = stix_package.timestamp
timestamp = int(time.mktime(timestamp.timetuple()))
except (TypeError, OverflowError, ValueError) as e:
logger.warning("Problem parsing stix timestamp: {}".format(e))
return timestamp
def parse_stix_indicators(stix_package, default_score):
reports = []
if not stix_package.indicators:
return reports
for indicator in stix_package.indicators:
if not indicator or not indicator.observable:
continue
score = get_stix_indicator_score(indicator, default_score)
timestamp = get_stix_indicator_timestamp(indicator)
yield from cybox_parse_observable(
indicator.observable, indicator, timestamp, score)
def parse_stix_observables(stix_package, default_score):
reports = []
if not stix_package.observables:
return reports
timestamp = get_stix_package_timestamp(stix_package)
for observable in stix_package.observables:
if not observable:
continue
yield from cybox_parse_observable( # single element list
observable, None, timestamp, default_score)
def sanitize_stix(stix_xml):
ret_xml = b''
try:
xml_root = etree.fromstring(stix_xml)
content = xml_root.find(
'.//{http://taxii.mitre.org/messages/taxii_xml_binding-1.1}Content')
if content is not None and len(content) == 0 and len(list(content)) == 0:
# Content has no children.
# So lets make sure we parse the xml text for content and
# re-add it as valid XML so we can parse
_content = xml_root.find(
"{http://taxii.mitre.org/messages/taxii_xml_binding-1.1}Content_Block/{http://taxii.mitre.org/messages/taxii_xml_binding-1.1}Content")
if _content:
new_stix_package = etree.fromstring(_content.text)
content.append(new_stix_package)
ret_xml = etree.tostring(xml_root)
except etree.ParseError as e:
logger.warning("Problem parsing stix: {}".format(e))
return ret_xml
def parse_stix(stix_xml, default_score):
reports = []
try:
stix_xml = sanitize_stix(stix_xml)
bio = BytesIO(stix_xml)
stix_package = STIXPackage.from_xml(bio)
if not stix_package:
logger.warning("Could not parse STIX xml")
return reports
if not stix_package.indicators and not stix_package.observables:
logger.info("No indicators or observables found in stix_xml")
return reports
yield from parse_stix_indicators(stix_package, default_score)
yield from parse_stix_observables(stix_package, default_score)
except etree.XMLSyntaxError as e:
logger.warning("Problem parsing stix: {}".format(e))
return reports
|
41f8193677c8862212cbfd32e56aaa1fe1b4eefd
|
7bc1d8634529eac952490399fb71f10bcedf05cc
|
/tests/scripts/thread-cert/border_router/test_border_router_as_fed.py
|
ec705c582fc071878125266251df3b80fb37ebaf
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] |
permissive
|
openthread/openthread
|
6a9e25d1cd224bde9796d9616f04f423dba27d77
|
102a631cb3f8938389d0d10199a14c59184039cd
|
refs/heads/main
| 2023-08-18T10:46:03.820124
| 2023-08-17T22:20:55
| 2023-08-17T22:20:55
| 55,808,787
| 3,485
| 1,296
|
BSD-3-Clause
| 2023-09-14T15:50:53
| 2016-04-08T20:47:41
|
C++
|
UTF-8
|
Python
| false
| false
| 3,360
|
py
|
test_border_router_as_fed.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import config
import thread_cert
# Test description:
# This test verifies bi-directional connectivity between Thread end device
# and infra host via an Border Router (FED)
#
# Topology:
# ----------------(eth)--------------------
# | |
# BR (FED) HOST
# |
# Leader
#
BR = 1
LEADER = 2
HOST = 3
class TestBorderRouterAsFed(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR: {
'name': 'BR',
'allowlist': [LEADER],
'is_otbr': True,
'version': '1.2',
'router_eligible': False,
},
LEADER: {
'name': 'LEADER',
'allowlist': [BR],
'version': '1.2',
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
br = self.nodes[BR]
leader = self.nodes[LEADER]
host = self.nodes[HOST]
host.start(start_radvd=False)
self.simulator.go(5)
leader.start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual('leader', leader.get_state())
br.start()
self.simulator.go(5)
self.assertEqual('child', br.get_state())
self.simulator.go(config.BORDER_ROUTER_STARTUP_DELAY)
self.assertEqual('child', br.get_state())
# Leader can ping to/from the Host on infra link.
self.assertTrue(leader.ping(host.get_ip6_address(config.ADDRESS_TYPE.ONLINK_ULA)[0]))
self.assertTrue(host.ping(leader.get_ip6_address(config.ADDRESS_TYPE.OMR)[0], backbone=True))
self.simulator.go(5)
if __name__ == '__main__':
unittest.main()
|
b35173f1ac8c8e89c1af4afacac428de6c4dbf7b
|
6f797bae522927214b4c4065d88b92d6fff127e0
|
/kur/model/hooks/kurhub_hook.py
|
3067a74aecb245c1bd734f71caa4cf421e7c8470
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
deepgram/kur
|
5a3c6b5dba462327ccb134dcde53bf60ee4bf1fd
|
fd0c120e50815c1e5be64e5dde964dcd47234556
|
refs/heads/master
| 2023-08-17T11:38:47.613445
| 2020-11-04T19:09:50
| 2020-11-04T19:09:50
| 74,182,569
| 873
| 139
|
Apache-2.0
| 2023-01-28T21:50:24
| 2016-11-19T02:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,957
|
py
|
kurhub_hook.py
|
"""
Copyright 2017 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
import urllib.request
from . import TrainingHook, PlotHook
from ...utils import prepare_json
from tempfile import NamedTemporaryFile
logger = logging.getLogger(__name__)
###############################################################################
class KurhubHook(TrainingHook):
""" Hook for posting to Kurhub.
"""
###########################################################################
@classmethod
def get_name(cls):
""" Returns the name of the hook.
"""
return 'kurhub'
###########################################################################
def __init__(self, uuid=None, endpoint="http://dev.kurhub.com/kur_updates", *args, **kwargs):
""" Creates a new kurhub hook.
"""
super().__init__(*args, **kwargs)
self.uuid = uuid
self.endpoint = endpoint
with NamedTemporaryFile() as tmp_file:
tmp_file.close()
self.plot_name = tmp_file.name
self.plot_hook = PlotHook(self.plot_name)
if uuid is None:
raise ValueError('kurhub hook requires a "uuid" to be defined.')
###########################################################################
def send_message(self, text, info=None):
""" Sends a message to kurhub.
"""
data = {
'text': text,
'uuid': self.uuid
}
url = self.endpoint
data, header = prepare_json(data)
self._submit(url, data, header)
###########################################################################
def send_plot_message(self, text, plot_string, info=None):
""" Sends a plot message to kurhub.
"""
data = {
'text': text,
'plot': plot_string,
'uuid': self.uuid
}
url = self.endpoint
data, header = prepare_json(data)
self._submit(url, data, header)
###########################################################################
def _submit(self, url, data, header):
""" Submits a POST request to kurhub.
"""
request = urllib.request.Request(
url,
data=data,
headers=header,
method='POST'
)
director = urllib.request.build_opener()
try:
response = director.open(request)
except: # pylint: disable=bare-except
logger.exception('Failed to connect to kurhub. Make sure the URL '
'and channel are correct. If the channel was newly created, '
'it might take a little time for kurhub to catch up.')
else:
if response.code != 200:
logger.error('Failed to post kurhub notification. Make sure '
'the URL and channel are correct. If the channel was newly '
'created, it might take a little time for kurhub to catch up.')
###########################################################################
def notify(self, status, log=None, info=None):
""" Sends the kurhub message.
"""
self.plot_hook.notify(status, log, info)
# check if plot
plot_name = '{}.png'.format(self.plot_name)
if os.path.isfile(plot_name):
if os.stat(plot_name).st_size > 0:
# upload
with open(plot_name, 'rb') as plotfile:
import base64
encoded_string = base64.b64encode(plotfile.read()).decode('utf-8')
## send as post request base64
self.send_plot_message('plot created', encoded_string)
# delete after upload
os.remove(plot_name)
info = info or {}
if status is TrainingHook.EPOCH_END:
epoch = info.pop('epoch', None)
total_epochs = info.pop('total_epochs', None)
text = 'Finished epoch {} of {}.'.format(epoch, total_epochs)
elif status is TrainingHook.TRAINING_END:
text = 'Training has ended.'
elif status is TrainingHook.TRAINING_START:
text = 'Started training.'
else:
text = None
if text:
self.send_message(text, info)
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
|
ca9729fb7d2877a309adf7984bc75574664eefba
|
e7ebc88fe9c84a29e02db87a5198f3265c36273c
|
/hordak/tests/admin/test_admin.py
|
aa107de11b653305654cbe5e2adce7925ba977b3
|
[
"MIT"
] |
permissive
|
adamcharnock/django-hordak
|
de24b571c7a5f313de0387e3b1a592fc14b27ddf
|
0108808204fd41e40f7f4d7dfa05ec440e1070cc
|
refs/heads/master
| 2023-08-06T02:09:37.370711
| 2023-08-03T14:00:19
| 2023-08-03T14:31:34
| 67,291,607
| 220
| 57
|
MIT
| 2023-09-08T12:58:13
| 2016-09-03T13:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
test_admin.py
|
from django.contrib.auth import get_user_model
from django.test.testcases import TestCase
from django.urls import reverse
from hordak.models import Account, Leg, Transaction
from hordak.tests.utils import DataProvider
class TestAdmin(DataProvider, TestCase):
def setUp(self):
self.user_account = self.account(name="User account", type="")
self.user_subaccount = self.account(
name="User account", parent=self.user_account
)
self.bank_account = self.account(
name="Bank account", is_bank_account=True, type=Account.TYPES.asset
)
self.income_account = self.account(
is_bank_account=False, type=Account.TYPES.income
)
transaction = Transaction.objects.create()
Leg.objects.create(
amount=-10, account=self.bank_account, transaction=transaction
)
Leg.objects.create(
amount=10, account=self.income_account, transaction=transaction
)
def test_account_list(self):
"""Test that accounts are listed on admin page"""
superuser = get_user_model().objects.create_superuser(username="superuser")
self.client.force_login(superuser)
url = reverse("admin:hordak_account_changelist")
res = self.client.get(url)
self.assertContains(
res,
f'<a href="/admin/hordak/account/{self.bank_account.id}/change/">Bank account</a>',
html=True,
)
self.assertContains(
res, '<td class="field-balance_sum">10.000000</td>', html=True
)
self.assertContains(res, '<td class="field-balance_sum">-</td>', html=True)
self.assertContains(res, '<td class="field-type_">-</td>', html=True)
self.assertContains(res, '<td class="field-type_">Income</td>', html=True)
self.assertContains(res, '<td class="field-type_">Asset</td>', html=True)
def test_account_edit(self):
"""Test account edit page"""
superuser = get_user_model().objects.create_superuser(username="superuser")
self.client.force_login(superuser)
url = reverse("admin:hordak_account_change", args=(self.bank_account.id,))
res = self.client.get(url)
self.assertContains(
res,
'<input type="text" name="name" value="Bank account" '
'class="vTextField" maxlength="50" required id="id_name">',
html=True,
)
def test_transaction_list(self):
"""Test that transactions are listed on admin page"""
superuser = get_user_model().objects.create_superuser(username="superuser")
self.client.force_login(superuser)
url = reverse("admin:hordak_transaction_changelist")
res = self.client.get(url)
self.assertContains(
res, '<td class="field-debited_accounts">Account 4</td>', html=True
)
|
af7035674f65974c926fa31f7d68d814cc8b153b
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/system/configure.py
|
660b7b12b29479831faa03fd0707a5fa85660244
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
configure.py
|
"""Common configure functions for system"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
log = logging.getLogger(__name__)
def config_license(device, license):
""" Config license on Device
Args:
device (`obj`): Device object
license (`str`): License name
Return:
None
Raise:
SubCommandFailure: Failed configuring interface
"""
try:
device.configure("license boot level {license}".format(license=license))
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not configure license {license}, Error: {error}'.format(
license=license, error=e)
)
def configure_boot_level_licence(device, nw_advantage=False, nw_essentials=False,
nw_premier=False, addon=False, adventerprise=False, advipservices=False,
ipbase=False):
""" Config boot level license on Device
Args:
device ('obj'): Device object
network-advantage ('bool'): boot level network-advantage
network-essentials ('bool'): boot level network-essentials
network-premier ('bool'): boot level network-premier
addon ('bool'): addon option for license
adventerprise ('bool'): boot level adventerprise
advipservices ('bool'): boot level advipservices
ipbase ('bool'): boot level ipbase
Return:
None
Raise:
SubCommandFailure: Failed configuring boot level license
"""
log.info(f"Configure boot level license")
cmd = "license boot level"
if adventerprise:
cmd += " adventerprise"
elif advipservices:
cmd += " advipservices"
elif ipbase:
cmd += " ipbase"
elif nw_advantage:
cmd += " network-advantage"
if addon:
cmd += " addon dna-advantage"
elif nw_essentials:
cmd += " network-essentials"
if addon:
cmd += " addon dna-essentials"
elif nw_premier:
cmd += " network-premier"
if addon:
cmd += " addon dna-premier"
try:
device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
f"Failed to configure boot level license Error, Error:\n{e}"
)
|
dad040d8ebe02312011c40cdb3ba9f7037b70a4f
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/asttokens/asttokens/line_numbers.py
|
aaf76cef6d040cab1e7eaa1719c03d0e720a3d85
|
[
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
line_numbers.py
|
# Copyright 2016 Grist Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import re
from typing import Dict, List, Tuple
_line_start_re = re.compile(r'^', re.M)
class LineNumbers(object):
"""
Class to convert between character offsets in a text string, and pairs (line, column) of 1-based
line and 0-based column numbers, as used by tokens and AST nodes.
This class expects unicode for input and stores positions in unicode. But it supports
translating to and from utf8 offsets, which are used by ast parsing.
"""
def __init__(self, text):
# type: (str) -> None
# A list of character offsets of each line's first character.
self._line_offsets = [m.start(0) for m in _line_start_re.finditer(text)]
self._text = text
self._text_len = len(text)
self._utf8_offset_cache = {} # type: Dict[int, List[int]] # maps line num to list of char offset for each byte in line
def from_utf8_col(self, line, utf8_column):
# type: (int, int) -> int
"""
Given a 1-based line number and 0-based utf8 column, returns a 0-based unicode column.
"""
offsets = self._utf8_offset_cache.get(line)
if offsets is None:
end_offset = self._line_offsets[line] if line < len(self._line_offsets) else self._text_len
line_text = self._text[self._line_offsets[line - 1] : end_offset]
offsets = [i for i,c in enumerate(line_text) for byte in c.encode('utf8')]
offsets.append(len(line_text))
self._utf8_offset_cache[line] = offsets
return offsets[max(0, min(len(offsets)-1, utf8_column))]
def line_to_offset(self, line, column):
# type: (int, int) -> int
"""
Converts 1-based line number and 0-based column to 0-based character offset into text.
"""
line -= 1
if line >= len(self._line_offsets):
return self._text_len
elif line < 0:
return 0
else:
return min(self._line_offsets[line] + max(0, column), self._text_len)
def offset_to_line(self, offset):
# type: (int) -> Tuple[int, int]
"""
Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers.
"""
offset = max(0, min(self._text_len, offset))
line_index = bisect.bisect_right(self._line_offsets, offset) - 1
return (line_index + 1, offset - self._line_offsets[line_index])
|
95e57ce8a6562a24b8d6cd4916f7fffe918f1dd5
|
713b2497bc3bc256e71d59837de2c772c1d1f566
|
/test/test_chaos.py
|
bba425571c33ed968f877c4f09b14cfe5dad6b65
|
[
"Apache-2.0"
] |
permissive
|
bbc/chaos-lambda
|
6ef0b94980c1de03f02e0c9c56bd917bc8d8c0d3
|
f00cedca76f3842256d8bc9a088dc419b0a05411
|
refs/heads/master
| 2022-12-05T00:04:42.158342
| 2022-11-29T15:31:51
| 2022-12-02T12:15:20
| 47,708,500
| 172
| 27
|
NOASSERTION
| 2022-12-02T15:39:52
| 2015-12-09T17:47:29
|
Python
|
UTF-8
|
Python
| false
| false
| 20,116
|
py
|
test_chaos.py
|
from __future__ import print_function
import json
import re
from unittest import mock
from base import mock_imports, PatchingTestCase
mock_imports([
"boto3"
]) # noqa
import chaos
class TestGetASGTag(PatchingTestCase):
def test_finds_tag_key_case_insensitively(self):
asg = {"Tags": [{"Key": "name", "Value": "success"}]}
self.assertEqual(chaos.get_asg_tag(asg, "NAME"), "success")
self.assertEqual(chaos.get_asg_tag(asg, "name"), "success")
self.assertEqual(chaos.get_asg_tag(asg, "NaMe"), "success")
def test_returns_default_if_key_not_found(self):
asg = {"Tags": []}
self.assertEqual(chaos.get_asg_tag(asg, "blah"), None)
self.assertEqual(chaos.get_asg_tag(asg, "blah", "abc"), "abc")
def test_returns_empty_string_if_tag_has_no_value(self):
# As far as I can tell this should never happen, but just in case...
asg = {"Tags": [{"Key": "name"}]}
self.assertEqual(chaos.get_asg_tag(asg, "name"), "")
class TestSafeFloat(PatchingTestCase):
def test_returns_float_if_string_is_valid(self):
self.assertEqual(chaos.safe_float("1.0", 0.5), 1.0)
self.assertEqual(chaos.safe_float("0.0", 0.5), 0.0)
self.assertEqual(chaos.safe_float(" 1.0 ", 0.5), 1.0)
def test_returns_default_if_string_is_invalid(self):
self.assertEqual(chaos.safe_float("not a number", 0.5), 0.5)
class TestGetASGProbability(PatchingTestCase):
patch_list = (
"chaos.PROBABILITY_TAG",
"chaos.get_asg_tag",
"chaos.log"
)
def get_log_lines(self, name):
lines = []
for args, kwargs in self.log.call_args_list:
parts = re.findall(r"\[.*?\]|[^ ]+", " ".join(args))
if parts[0] == name:
lines.append(parts)
return lines
def test_returns_default_probability_if_no_tag_set(self):
self.get_asg_tag.return_value = None
p = chaos.get_asg_probability({}, mock.sentinel.default)
self.assertEqual(p, mock.sentinel.default)
def test_queries_probability_tag(self):
self.get_asg_tag.return_value = "0.1"
chaos.get_asg_probability(mock.sentinel.asg, None)
self.get_asg_tag.assert_called_once_with(
mock.sentinel.asg,
self.PROBABILITY_TAG,
mock.ANY
)
def test_returns_probability_from_tag_value_if_valid(self):
self.get_asg_tag.return_value = "0.1"
p = chaos.get_asg_probability({"AutoScalingGroupName": "x"}, None)
self.assertEqual(p, 0.1)
def test_returns_default_probability_if_tag_value_is_invalid(self):
asg = {"AutoScalingGroupName": "x"}
default = mock.sentinel.default
self.get_asg_tag.return_value = "blah"
p = chaos.get_asg_probability(asg, default)
self.assertEqual(p, default)
def test_returns_default_probability_if_tag_value_is_out_of_range(self):
asg = {"AutoScalingGroupName": "x"}
default = mock.sentinel.default
for value in ("-42", "-1.2", "1.2", "9"):
self.get_asg_tag.return_value = value
p = chaos.get_asg_probability(asg, default)
self.assertEqual(p, default)
def test_logs_parseable_error_if_tag_value_is_invalid(self):
asg = {"AutoScalingGroupName": "ASGNameHere"}
for value in ("blah", "-42", "0.1 0.2"):
self.log.reset_mock()
self.get_asg_tag.return_value = value
chaos.get_asg_probability(asg, None)
lines = self.get_log_lines("bad-probability")
self.assertEqual(set((p[1], p[3]) for p in lines), set([
("[" + value + "]", "ASGNameHere")
]))
class TestGetASGInstanceId(PatchingTestCase):
patch_list = (
"chaos.get_asg_probability",
"random.choice",
"random.random",
)
def test_returns_None_if_there_are_no_instances(self):
self.random.return_value = 1.0
asg = {"Instances": []}
self.assertEqual(chaos.get_asg_instance_id(asg, 0), None)
asg = {}
self.assertEqual(chaos.get_asg_instance_id(asg, 0), None)
def test_returns_None_if_probability_test_fails(self):
self.choice.side_effect = lambda l: l[0]
self.get_asg_probability.return_value = 0.5
self.random.return_value = 1.0
asg = {"Instances": [{"InstanceId": "i-1234abcd"}]}
default = mock.sentinel.default
self.assertEqual(chaos.get_asg_instance_id(asg, default), None)
self.get_asg_probability.assert_called_once_with(asg, default)
def test_returns_instance_id_if_probability_test_succeeds(self):
self.choice.side_effect = lambda l: l[0]
self.get_asg_probability.return_value = 0.5
self.random.return_value = 0.0
asg = {"Instances": [{"InstanceId": "i-1234abcd"}]}
default = mock.sentinel.default
self.assertEqual(chaos.get_asg_instance_id(asg, default), "i-1234abcd")
self.get_asg_probability.assert_called_once_with(asg, default)
def test_returns_random_choice_of_instance_ids(self):
self.get_asg_probability.return_value = 0.5
self.random.return_value = 0.0
instances = [
{"InstanceId": "i-00000000"},
{"InstanceId": "i-11111111"},
{"InstanceId": "i-22222222"}
]
i = chaos.get_asg_instance_id({"Instances": instances}, 0)
self.choice.assert_called_once_with(instances)
self.assertEqual(i, self.choice.return_value.get.return_value)
class TestGetAllASGs(PatchingTestCase):
def test_uses_paginator_for_describe_auto_scaling_groups(self):
autoscaling = mock.Mock()
paginator = autoscaling.get_paginator.return_value
paginator.paginate.return_value = iter([])
asgs = chaos.get_all_asgs(autoscaling)
list(asgs) # force evaluation of the generator
autoscaling.get_paginator.assert_called_once_with(
"describe_auto_scaling_groups"
)
paginator.paginate.assert_called_once_with()
def test_yields_asgs_from_each_response(self):
autoscaling = mock.Mock()
paginator = autoscaling.get_paginator.return_value
paginator.paginate.return_value = iter([
{"AutoScalingGroups": [mock.sentinel.one, mock.sentinel.two]},
{"AutoScalingGroups": [mock.sentinel.three]},
{"AutoScalingGroups": [mock.sentinel.four, mock.sentinel.five]}
])
asgs = chaos.get_all_asgs(autoscaling)
self.assertEqual(set(asgs), set([
mock.sentinel.one,
mock.sentinel.two,
mock.sentinel.three,
mock.sentinel.four,
mock.sentinel.five
]))
def test_ignores_responses_with_missing_AutoScalingGroups_key(self):
autoscaling = mock.Mock()
paginator = autoscaling.get_paginator.return_value
paginator.paginate.return_value = iter([
{"AutoScalingGroups": [mock.sentinel.one]},
{},
{"AutoScalingGroups": [mock.sentinel.two]}
])
asgs = chaos.get_all_asgs(autoscaling)
self.assertEqual(set(asgs), set([
mock.sentinel.one,
mock.sentinel.two
]))
class TestGetTargets(PatchingTestCase):
patch_list = (
"chaos.get_all_asgs",
"chaos.get_asg_instance_id",
)
def test_requests_all_auto_scaling_groups(self):
autoscaling = mock.Mock()
self.get_all_asgs.return_value = iter([])
chaos.get_targets(autoscaling, 0)
self.get_all_asgs.assert_called_once_with(autoscaling)
def test_returns_empty_list_if_no_auto_scaling_groups(self):
autoscaling = mock.Mock()
self.get_all_asgs.return_value = iter([])
self.assertEqual(chaos.get_targets(autoscaling, 0), [])
def test_passes_default_probability_to_get_asg_instance_id(self):
autoscaling = mock.Mock()
asg = {"AutoScalingGroupName": "a", "Instances": ["i-11111111"]}
default = mock.sentinel.default_probablity
self.get_asg_instance_id.return_value = None
self.get_all_asgs.return_value = iter([asg])
chaos.get_targets(autoscaling, default)
self.get_asg_instance_id.assert_called_once_with(asg, default)
def test_gets_instance_from_each_asg(self):
autoscaling = mock.Mock()
self.get_asg_instance_id.side_effect = lambda asg, default: \
asg["Instances"][0]
self.get_all_asgs.return_value = iter([
{"AutoScalingGroupName": "a", "Instances": ["i-11111111"]},
{"AutoScalingGroupName": "b", "Instances": ["i-22222222"]},
{"AutoScalingGroupName": "c", "Instances": ["i-33333333"]}
])
targets = chaos.get_targets(autoscaling, 0)
self.assertEqual(set(targets), set([
("a", "i-11111111"),
("b", "i-22222222"),
("c", "i-33333333")
]))
def test_ignores_asgs_with_no_instances(self):
autoscaling = mock.Mock()
self.get_asg_instance_id.side_effect = lambda asg, default: \
asg["Instances"][0] if len(asg["Instances"]) != 0 else None
self.get_all_asgs.return_value = iter([
{"AutoScalingGroupName": "a", "Instances": []},
{"AutoScalingGroupName": "b", "Instances": ["i-22222222"]},
{"AutoScalingGroupName": "c", "Instances": []}
])
targets = chaos.get_targets(autoscaling, 0)
self.assertEqual(targets, [("b", "i-22222222")])
class TestTerminateTargets(PatchingTestCase):
patch_list = (
"chaos.log",
"chaos.os"
)
def get_log_lines(self, name):
lines = []
for args, kwargs in self.log.call_args_list:
parts = re.findall(r"\[.*?\]|[^ ]+", " ".join(args))
if parts[0] == name:
lines.append(parts)
return lines
def test_terminates_target_instances(self):
ec2 = mock.Mock()
sns = mock.Mock()
ec2.terminate_instances.return_value = {}
chaos.terminate_targets(ec2, sns, [
("a", "i-11111111"),
("b", "i-22222222")
])
ec2.terminate_instances.assert_called_once_with(
InstanceIds=["i-11111111", "i-22222222"]
)
def test_parseable_log_line_for_each_targeted_instance(self):
ec2 = mock.Mock()
sns = mock.Mock()
ec2.terminate_instances.return_value = {}
chaos.terminate_targets(ec2, sns, [
("asg-name-one", "i-00000000"),
("second-asg", "i-11111111"),
("the-third-asg", "i-22222222")
])
logged = self.get_log_lines("targeting")
self.assertEqual(set((part[1], part[3]) for part in logged), set([
("i-00000000", "asg-name-one"),
("i-11111111", "second-asg"),
("i-22222222", "the-third-asg")
]))
def test_parseable_log_line_for_each_termination_result(self):
ec2 = mock.Mock()
sns = mock.Mock()
# We're cheating here and returning results that are unrelated to the
# list passed to terminate_targets
ec2.terminate_instances.return_value = {
"TerminatingInstances": [
{"InstanceId": "i-00000000", "CurrentState": {"Name": "s1"}},
{"InstanceId": "i-11111111", "CurrentState": {"Name": "s2"}},
{"InstanceId": "i-22222222", "CurrentState": {"Name": "s3"}}
]
}
chaos.terminate_targets(ec2, sns, [("a", "i-11111111")])
logged = self.get_log_lines("result")
self.assertEqual(set((part[1], part[3]) for part in logged), set([
("i-00000000", "s1"),
("i-11111111", "s2"),
("i-22222222", "s3")
]))
def test_returns_termination_results(self):
ec2 = mock.Mock()
sns = mock.Mock()
# We're cheating here and returning results that are unrelated to the
# list passed to terminate_targets
ec2.terminate_instances.return_value = {
"TerminatingInstances": [
{"InstanceId": "i-00000000", "CurrentState": {"Name": "s1"}},
{"InstanceId": "i-11111111", "CurrentState": {"Name": "s2"}},
{"InstanceId": "i-22222222", "CurrentState": {"Name": "s3"}}
]
}
results = chaos.terminate_targets(ec2, sns, [])
self.assertEqual(set(results), set([
("i-00000000", "s1"),
("i-11111111", "s2"),
("i-22222222", "s3")
]))
def test_sends_notification_per_instance(self):
self.os.environ.get.return_value = "MyTestTopic"
ec2 = mock.Mock()
sns = mock.Mock()
ec2.terminate_instances.return_value = {
"TerminatingInstances": []
}
chaos.terminate_targets(ec2, sns, [("a1", "i1"), ("a2", "i2")])
sns.publish.assert_any_call(
TopicArn="MyTestTopic",
Message=MatchJson({
"event_name": "chaos_lambda.terminating",
"asg_name": "a1",
"instance_id": "i1"
})
)
sns.publish.assert_any_call(
TopicArn="MyTestTopic",
Message=MatchJson({
"event_name": "chaos_lambda.terminating",
"asg_name": "a2",
"instance_id": "i2"
})
)
self.assertEqual(2, sns.publish.call_count)
def test_handles_sns_exception(self):
self.os.environ.get.return_value = "MyTestTopic"
ec2 = mock.Mock()
sns = mock.Mock()
ec2.terminate_instances.return_value = {}
sns.publish.side_effect = Exception("boom")
chaos.terminate_targets(ec2, sns, [
("a", "i-11111111"),
("b", "i-22222222")
])
ec2.terminate_instances.assert_called_once_with(
InstanceIds=["i-11111111", "i-22222222"]
)
class MatchJson:
'''
A JSON Matcher that takes a Dictionary as input, checking that those
specified keys and values exist in the JSON string that is supplied. It
does not check if there are any other keys in the JSON string.
'''
def __init__(self, expected):
self.expected = expected
def __repr__(self):
return "'" + json.dumps(self.expected) + "'"
def __eq__(self, json_string):
try:
parsed_json = json.loads(json_string)
for key in self.expected.keys():
try:
if self.expected[key] != parsed_json[key]:
return False
except KeyError:
print("The key '" + key + "' does not exist")
return False
except ValueError:
print("Message passed to sns.publish was not valid JSON")
return False
return True
class TestChaosLambda(PatchingTestCase):
patch_list = (
"chaos.boto3",
"chaos.get_targets",
"chaos.log",
"chaos.terminate_targets",
)
def setUp(self):
super(TestChaosLambda, self).setUp()
self.clients = {}
self.boto3.client.side_effect = self.make_client
def make_client(self, name, region_name):
c = self.clients.get(name, None)
if c is not None:
self.assertEqual(c.region_name, region_name)
else:
c = self.clients[name] = mock.Mock(region_name=region_name)
return c
def test_parseable_log_line_for_trigger(self):
self.get_targets.return_value = []
chaos.chaos_lambda(["sp-moonbase-1"], 0)
self.log.assert_called_once_with("triggered", "sp-moonbase-1")
def test_does_nothing_if_no_targets(self):
self.get_targets.return_value = []
chaos.chaos_lambda(["sp-moonbase-1"], 0)
self.assertEqual(self.terminate_targets.call_count, 0)
def test_uses_autoscaling_service_in_correct_region(self):
self.get_targets.return_value = []
chaos.chaos_lambda(["sp-moonbase-1"], 0)
autoscaling = self.get_targets.call_args[0][0]
self.assertEqual(autoscaling, self.clients["autoscaling"])
self.assertEqual(autoscaling.region_name, "sp-moonbase-1")
def test_passes_default_probability_to_get_targets(self):
default = mock.sentinel.default
self.get_targets.return_value = []
chaos.chaos_lambda(["sp-moonbase-1"], default)
self.assertEqual(self.get_targets.call_args[0][1], default)
def test_terminates_target_instances_in_correct_region(self):
targets = [("a", "i-11111111"), ("b", "i-22222222")]
self.get_targets.return_value = targets
ec2 = self.make_client("ec2", region_name="sp-moonbase-1")
sns = self.make_client("sns", region_name="sp-moonbase-1")
chaos.chaos_lambda(["sp-moonbase-1"], 0)
# Above triggers self.make_client, which checks the region name
self.terminate_targets.assert_called_once_with(ec2, sns, targets)
class TestGetRegions(PatchingTestCase):
patch_list = (
"chaos.os",
)
def test_looks_for_a_regions_environment_variable(self):
self.os.environ.get.return_value = ""
context = mock.Mock()
context.invoked_function_arn = "arn:aws:lambda:re-gion-1:..."
chaos.get_regions(context)
self.os.environ.get.assert_called_once_with("regions", "")
def test_extracts_region_from_context_if_no_regions_variable(self):
self.os.environ.get.return_value = ""
context = mock.Mock()
for region in ("eu-west-1", "sp-moonbase-1"):
context.invoked_function_arn = "arn:aws:lambda:" + region + ":..."
result = chaos.get_regions(context)
self.assertEqual(result, [region])
def test_reads_from_comma_separated_regions_variable_if_set(self):
self.os.environ.get.return_value = "re-gion-1,sp-moonbase-1"
result = chaos.get_regions(mock.Mock())
self.assertEqual(result, ["re-gion-1", "sp-moonbase-1"])
def test_ignores_whitespace_in_regions_variable(self):
self.os.environ.get.return_value = "\n sp-moonbase-1\n, re-gion-1 "
result = chaos.get_regions(mock.Mock())
self.assertEqual(result, ["sp-moonbase-1", "re-gion-1"])
class TestGetDefaultProbability(PatchingTestCase):
patch_list = (
"chaos.os",
)
def test_looks_for_a_probability_environment_variable(self):
self.os.environ.get.return_value = ""
chaos.get_default_probability()
self.os.environ.get.assert_called_once_with("probability", "")
def test_returns_default_if_no_probability_variable(self):
self.os.environ.get.return_value = ""
p = chaos.get_default_probability()
self.assertEqual(p, chaos.DEFAULT_PROBABILITY)
def test_returns_float_value_of_probability_variable(self):
for s in ("0.1", "0.2", "0.3"):
self.os.environ.get.return_value = s
p = chaos.get_default_probability()
self.assertEqual(p, float(s))
def test_ignores_whitespace_in_probability_variable(self):
self.os.environ.get.return_value = " \n0.1 "
p = chaos.get_default_probability()
self.assertEqual(p, 0.1)
class TestHandler(PatchingTestCase):
patch_list = (
"chaos.chaos_lambda",
"chaos.get_default_probability",
"chaos.get_regions",
)
def test_passes_along_the_region_list(self):
context = mock.sentinel.context
chaos.handler(None, context)
self.get_regions.assert_called_once_with(context)
self.chaos_lambda.assert_called_once_with(
self.get_regions.return_value,
mock.ANY
)
def test_passes_along_the_default_probability(self):
chaos.handler(None, mock.Mock())
self.get_default_probability.assert_called_once_with()
self.chaos_lambda.assert_called_once_with(
mock.ANY,
self.get_default_probability.return_value
)
|
df0b7a19d64cf774a1733a9f6a758ea561779dd9
|
6a531e292af43d3e7aec6d3019e0333362afe454
|
/src/cactus/refmap/cactus_graphmap.py
|
e842daf665e2cbbaad16604883cc17d848185faa
|
[
"MIT"
] |
permissive
|
ComparativeGenomicsToolkit/cactus
|
2d4d891b6b3af0a22b64aba11f18a867a7666e58
|
41d99360cfa79ada5bdca307883c30c7fb59a06d
|
refs/heads/master
| 2023-09-01T20:38:37.550816
| 2023-09-01T14:47:06
| 2023-09-01T14:47:06
| 1,317,650
| 369
| 106
|
NOASSERTION
| 2023-09-05T20:27:44
| 2011-02-01T20:17:33
|
C
|
UTF-8
|
Python
| false
| false
| 28,627
|
py
|
cactus_graphmap.py
|
#!/usr/bin/env python3
"""jobs and command for running the minigraph pan-genome building pipeline, which takes
input: usual list of fasta files + a minigraph-compatible GFA graph
output: PAF file containing alignment of each input Fasta to the contig sequences of the graph
(these contig sequences are also output in their own Fasta, which can be treated as an "assembly"
by the rest of cactus)
"""
import os, sys
from argparse import ArgumentParser
import xml.etree.ElementTree as ET
import copy
import timeit
from operator import itemgetter
from cactus.progressive.seqFile import SeqFile
from cactus.shared.common import setupBinaries, importSingularityImage
from cactus.shared.common import cactusRootPath
from cactus.shared.configWrapper import ConfigWrapper
from cactus.shared.common import makeURL, catFiles
from cactus.shared.common import enableDumpStack
from cactus.shared.common import cactus_override_toil_options
from cactus.shared.common import cactus_call
from cactus.shared.common import getOptionalAttrib, findRequiredNode
from cactus.shared.common import unzip_gz, zip_gz
from cactus.shared.version import cactus_commit
from cactus.preprocessor.checkUniqueHeaders import sanitize_fasta_headers
from toil.job import Job
from toil.common import Toil
from toil.statsAndLogging import logger
from toil.statsAndLogging import set_logging_from_options
from toil.realtimeLogger import RealtimeLogger
from cactus.shared.common import cactus_cpu_count
from cactus.progressive.progressive_decomposition import compute_outgroups, parse_seqfile, get_subtree, get_spanning_subtree, get_event_set
from cactus.refmap.cactus_minigraph import check_sample_names
from sonLib.nxnewick import NXNewick
from sonLib.bioio import getTempDirectory
def main():
parser = ArgumentParser()
Job.Runner.addToilOptions(parser)
parser.add_argument("seqFile", help = "Seq file (will be modified if necessary to include graph Fasta sequence)")
parser.add_argument("minigraphGFA", help = "Minigraph-compatible reference graph in GFA format (can be gzipped)")
parser.add_argument("outputPAF", type=str, help = "Output pairwise alignment file in PAF format")
parser.add_argument("--outputFasta", type=str, help = "Output graph sequence file in FASTA format (required if not present in seqFile)")
parser.add_argument("--maskFilter", type=int, help = "Ignore softmasked sequence intervals > Nbp (overrides config option of same name)")
parser.add_argument("--delFilter", type=int, help = "Filter out split-mapping-implied deletions > Nbp (default will be \"delFilter\" from the config")
parser.add_argument("--outputGAFDir", type=str, help = "Output GAF alignments (raw minigraph output before PAF conversion) to this directory")
parser.add_argument("--reference", nargs='+', type=str, help = "Reference genome name. MAPQ filter will not be applied to it")
parser.add_argument("--refFromGFA", action="store_true", help = "Do not align reference (--reference) from seqfile, and instead extract its alignment from the rGFA tags (must have been used as reference for minigraph GFA construction)")
parser.add_argument("--mapCores", type=int, help = "Number of cores for minigraph. Overrides graphmap cpu in configuration")
#WDL hacks
parser.add_argument("--pathOverrides", nargs="*", help="paths (multiple allowed) to override from seqFile")
parser.add_argument("--pathOverrideNames", nargs="*", help="names (must be same number as --pathOverrides) of path overrides")
#Progressive Cactus Options
parser.add_argument("--configFile", dest="configFile",
help="Specify cactus configuration file",
default=os.path.join(cactusRootPath(), "cactus_progressive_config.xml"))
parser.add_argument("--latest", dest="latest", action="store_true",
help="Use the latest version of the docker container "
"rather than pulling one matching this version of cactus")
parser.add_argument("--containerImage", dest="containerImage", default=None,
help="Use the the specified pre-built containter image "
"rather than pulling one from quay.io")
parser.add_argument("--binariesMode", choices=["docker", "local", "singularity"],
help="The way to run the Cactus binaries", default=None)
options = parser.parse_args()
setupBinaries(options)
set_logging_from_options(options)
enableDumpStack()
if options.outputGAFDir:
if not os.path.isdir(options.outputGAFDir):
os.makedirs(options.outputGAFDir)
if (options.pathOverrides or options.pathOverrideNames):
if not options.pathOverrides or not options.pathOverrideNames or \
len(options.pathOverrideNames) != len(options.pathOverrides):
raise RuntimeError('same number of values must be passed to --pathOverrides and --pathOverrideNames')
# support but ignore multi reference
if options.reference:
options.reference = options.reference[0]
# Mess with some toil options to create useful defaults.
cactus_override_toil_options(options)
logger.info('Cactus Command: {}'.format(' '.join(sys.argv)))
logger.info('Cactus Commit: {}'.format(cactus_commit))
start_time = timeit.default_timer()
graph_map(options)
end_time = timeit.default_timer()
run_time = end_time - start_time
logger.info("cactus-graphmap has finished after {} seconds".format(run_time))
def graph_map(options):
with Toil(options) as toil:
importSingularityImage(options)
#Run the workflow
config_node = ET.parse(options.configFile).getroot()
config_wrapper = ConfigWrapper(config_node)
graph_event = getOptionalAttrib(findRequiredNode(config_node, "graphmap"), "assemblyName", default="_MINIGRAPH_")
if options.restart:
paf_id, gfa_fa_id, gaf_id, unfiltered_paf_id, paf_filter_log = toil.restart()
else:
# load up the seqfile and figure out the outgroups and schedule
config_wrapper.substituteAllPredefinedConstantsWithLiterals()
mc_tree, input_seq_map, og_candidates = parse_seqfile(options.seqFile, config_wrapper, pangenome=True)
og_map = compute_outgroups(mc_tree, config_wrapper, set(og_candidates))
event_set = get_event_set(mc_tree, config_wrapper, og_map, mc_tree.getRootName())
# apply path overrides. this was necessary for wdl which doesn't take kindly to
# text files of local paths (ie seqfile). one way to fix would be to add support
# for s3 paths and force wdl to use it. a better way would be a more fundamental
# interface shift away from files of paths throughout all of cactus
if options.pathOverrides:
for name, override in zip(options.pathOverrideNames, options.pathOverrides):
input_seq_map[name] = override
#apply the maskfilter override
if options.maskFilter is not None:
findRequiredNode(config_node, "graphmap").attrib["maskFilter"] = str(options.maskFilter)
if options.delFilter is not None:
findRequiredNode(config_node, "graphmap").attrib["delFilter"] = str(options.delFilter)
# apply cpu override
if options.mapCores is not None:
findRequiredNode(config_node, "graphmap").attrib["cpu"] = str(options.mapCores)
mg_cores = getOptionalAttrib(findRequiredNode(config_node, "graphmap"), "cpu", typeFn=int, default=1)
if options.batchSystem.lower() in ['single_machine', 'singleMachine']:
mg_cores = min(mg_cores, cactus_cpu_count(), int(options.maxCores) if options.maxCores else sys.maxsize)
findRequiredNode(config_node, "graphmap").attrib["cpu"] = str(mg_cores)
# get the minigraph "virutal" assembly name
graph_event = getOptionalAttrib(findRequiredNode(config_node, "graphmap"), "assemblyName", default="_MINIGRAPH_")
if graph_event in event_set:
# dont need to import this
event_set.remove(graph_event)
# validate the sample names
check_sample_names(input_seq_map.keys(), options.reference)
# check --reference input (a bit redundant to above, but does additional leaf check)
if options.reference:
leaves = [mc_tree.getName(leaf) for leaf in mc_tree.getLeaves()]
if options.reference not in leaves:
raise RuntimeError("Genome specified with --reference, {}, not found in tree leaves".format(options.reference))
if options.refFromGFA:
if not options.reference:
raise RuntimeError("--reference must be used with --refFromGFA")
# ugly, but this option used to be a string
# todo: probably best to eventually get rid of this option entirely.
options.refFromGFA = options.reference
# we're not going to need the fasta for anything, so forget about it now
del input_seq_map[options.refFromGFA]
if not options.outputFasta and graph_event not in input_seq_map:
raise RuntimeError("{} assembly not found in seqfile so it must be specified with --outputFasta".format(graph_event))
#import the graph
logger.info("Importing {}".format(options.minigraphGFA))
gfa_id = toil.importFile(makeURL(options.minigraphGFA))
#import the sequences (that we need to align for the given event, ie leaves and outgroups)
seq_id_map = {}
fa_id_map = {}
for (genome, seq) in input_seq_map.items():
if genome in event_set:
if os.path.isdir(seq):
tmpSeq = getTempFile()
catFiles([os.path.join(seq, subSeq) for subSeq in os.listdir(seq)], tmpSeq)
seq = tmpSeq
seq = makeURL(seq)
logger.info("Importing {}".format(seq))
seq_id_map[genome] = toil.importFile(seq)
fa_id_map[genome] = seq
# run the workflow
paf_id, gfa_fa_id, gaf_id, unfiltered_paf_id, paf_filter_log, paf_was_filtered = toil.start(Job.wrapJobFn(
minigraph_workflow, options, config_wrapper, seq_id_map, gfa_id, graph_event))
#export the paf
toil.exportFile(paf_id, makeURL(options.outputPAF))
output_gaf = options.outputPAF[:-4] if options.outputPAF.endswith('.paf') else options.outputPAF
output_gaf += '.gaf.gz'
toil.exportFile(gaf_id, makeURL(output_gaf))
if paf_was_filtered:
toil.exportFile(unfiltered_paf_id, makeURL(options.outputPAF + ".unfiltered.gz"))
toil.exportFile(paf_filter_log, makeURL(options.outputPAF + ".filter.log"))
if gfa_fa_id:
toil.exportFile(gfa_fa_id, makeURL(options.outputFasta))
# update the input seqfile (in place!)
if options.outputFasta:
add_genome_to_seqfile(options.seqFile, makeURL(options.outputFasta), graph_event)
def minigraph_workflow(job, options, config, seq_id_map, gfa_id, graph_event, sanitize=True):
""" Overall workflow takes command line options and returns (paf-id, (optional) fa-id) """
fa_id = None
gfa_id_size = gfa_id.size
# can be a list coming in from cactus-pangenome, but we only need first item
if type(options.reference) is list:
options.reference = options.reference[0]
root_job = Job()
job.addChild(root_job)
mg_cores = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "cpu", typeFn=int, default=1)
# enforce unique prefixes and unzip fastas
if sanitize:
sanitize_job = root_job.addChildJobFn(sanitize_fasta_headers, seq_id_map, pangenome=True)
seq_id_map = sanitize_job.rv()
zipped_gfa = options.minigraphGFA.endswith('.gz')
if options.outputFasta:
# convert GFA to fasta
scale = 5 if zipped_gfa else 1
fa_job = root_job.addChildJobFn(make_minigraph_fasta, gfa_id, options.outputFasta, graph_event,
disk=scale*2*gfa_id.size, memory=2*scale*gfa_id.size)
fa_id = fa_job.rv()
if zipped_gfa:
# gaf2paf needs unzipped gfa, so we take care of that upfront
gfa_unzip_job = root_job.addChildJobFn(unzip_gz, options.minigraphGFA, gfa_id, disk=5*gfa_id.size)
gfa_id = gfa_unzip_job.rv()
gfa_id_size *= 10
options.minigraphGFA = options.minigraphGFA[:-3]
paf_job = Job.wrapJobFn(minigraph_map_all, config, gfa_id, seq_id_map, graph_event)
root_job.addFollowOn(paf_job)
if options.reference:
# extract a PAF directly from the rGFAs tag for the given reference
# if --refFromGFA is specified, we get the entire alignment from that, otherwise we just take contigs
# that didn't get mapped by anything else
gfa2paf_job = Job.wrapJobFn(extract_paf_from_gfa, gfa_id, options.minigraphGFA, options.reference, graph_event, paf_job.rv(0) if not options.refFromGFA else None,
disk=gfa_id_size, memory=gfa_id_size)
if options.refFromGFA:
root_job.addChild(gfa2paf_job)
else:
paf_job.addFollowOn(gfa2paf_job)
merge_paf_job = Job.wrapJobFn(merge_pafs, {"1" : paf_job.rv(0), "2" : gfa2paf_job.rv()}, disk=gfa_id_size)
paf_job.addFollowOn(merge_paf_job)
gfa2paf_job.addFollowOn(merge_paf_job)
out_paf_id = merge_paf_job.rv()
prev_job = merge_paf_job
else:
out_paf_id = paf_job.rv(0)
prev_job = paf_job
# apply the optional deletion filter
unfiltered_paf_id = None
filtered_paf_log = None
del_filter = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "delFilter", int, default=-1)
if del_filter > 0:
del_filter_threshold = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "delFilterThreshold", float, default=None)
del_size_threshold = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "delFilterQuerySizeThreshold", float, default=None)
del_filter_job = prev_job.addFollowOnJobFn(filter_paf_deletions, out_paf_id, gfa_id, del_filter, del_filter_threshold,
del_size_threshold,
disk=gfa_id_size, cores=mg_cores,
memory=6*gfa_id_size)
unfiltered_paf_id = prev_job.addFollowOnJobFn(zip_gz, 'mg.paf.unfiltered', out_paf_id, disk=gfa_id_size).rv()
out_paf_id = del_filter_job.rv(0)
filtered_paf_log = del_filter_job.rv(1)
paf_was_filtered = del_filter_job.rv(2)
return out_paf_id, fa_id if options.outputFasta else None, paf_job.rv(1), unfiltered_paf_id, filtered_paf_log, paf_was_filtered
def make_minigraph_fasta(job, gfa_file_id, gfa_file_path, name):
""" Use gfatools to make the minigraph "assembly" """
# note: using the toil-vg convention of naming working files manually so that logging is more readable
work_dir = job.fileStore.getLocalTempDir()
gfa_path = os.path.join(work_dir, "mg.gfa")
fa_path = os.path.join(work_dir, "minigraph_sequences.fa")
job.fileStore.readGlobalFile(gfa_file_id, gfa_path)
cmd = [["gfatools", "gfa2fa", gfa_path]]
if name:
cmd.append(["sed", "-e", "s/^>\(.\)/>id={}|\\1/g".format(name)])
if gfa_file_path.endswith('.gz'):
cmd.append(['bgzip', '--threads', str(job.cores)])
fa_path += '.gz'
if len(cmd) == 1:
cmd = cmd[0]
cactus_call(outfile=fa_path, parameters=cmd)
return job.fileStore.writeGlobalFile(fa_path)
def minigraph_map_all(job, config, gfa_id, fa_id_map, graph_event):
""" top-level job to run the minigraph mapping in parallel, returns paf """
# hang everything on this job, to self-contain workflow
top_job = Job()
job.addChild(top_job)
mg_cores = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "cpu", typeFn=int, default=1)
# doing the paf conversion is more efficient when done separately for each genome. we can get away
# with doing this if the universal filter (which needs to process everything at once) is disabled
xml_node = findRequiredNode(config.xmlRoot, "graphmap")
paf_per_genome = not getOptionalAttrib(xml_node, "universalMZFilter", float)
# do the mapping
gaf_id_map = {}
paf_id_map = {}
for event, fa_id in fa_id_map.items():
minigraph_map_job = top_job.addChildJobFn(minigraph_map_one, config, event, fa_id, gfa_id,
# todo: estimate RAM
cores=mg_cores, disk=5*fa_id.size + gfa_id.size,
memory=72*fa_id.size + 2*gfa_id.size)
gaf_id_map[event] = minigraph_map_job.rv(0)
paf_id_map[event] = minigraph_map_job.rv(1)
# merge up
paf_merge_job = top_job.addFollowOnJobFn(merge_pafs, paf_id_map)
gaf_merge_job = top_job.addFollowOnJobFn(merge_pafs, gaf_id_map, gzip=True)
return paf_merge_job.rv(), gaf_merge_job.rv()
def minigraph_map_one(job, config, event_name, fa_file_id, gfa_file_id):
""" Run minigraph to map a Fasta file to a GFA graph, producing a GAF output """
work_dir = job.fileStore.getLocalTempDir()
gfa_path = os.path.join(work_dir, "mg.gfa")
fa_path = os.path.join(work_dir, "{}.fa".format(event_name))
if fa_path == gfa_path or fa_path == gfa_path + ".gz":
gfa_path += ".1"
gaf_path = os.path.join(work_dir, "{}.gaf".format(event_name))
job.fileStore.readGlobalFile(gfa_file_id, gfa_path)
job.fileStore.readGlobalFile(fa_file_id, fa_path)
# parse options from the config
xml_node = findRequiredNode(config.xmlRoot, "graphmap")
minigraph_opts = getOptionalAttrib(xml_node, "minigraphMapOptions", str, default="")
opts_list = minigraph_opts.split()
# add required options if not present
if "-c" not in opts_list:
opts_list += ["-c"]
if "-t" not in opts_list:
opts_list += ["-t", str(int(job.cores))]
cmd = []
# optional hardmasking of softmasked fasta input (to ignore masked sequence)
mask_filter = getOptionalAttrib(xml_node, "maskFilter", int, default=-1)
if mask_filter >= 0:
cmd += [['cactus_softmask2hardmask', fa_path, '-m', str(mask_filter)]]
fa_path = '-'
# run minigraph mapping
cmd += [["minigraph", gfa_path, fa_path, "-o", gaf_path] + opts_list]
cactus_call(parameters=cmd, job_memory=job.memory)
# convert the gaf into unstable gaf (targets are node sequences)
# note: the gfa needs to be uncompressed for this tool to work
mg_lengths_path = gfa_path + '.node_lengths.tsv'
unstable_gaf_path = gaf_path + '.unstable'
cmd = ['gaf2unstable', gaf_path, '-g', gfa_path, '-o', mg_lengths_path]
# optional gaf overlap filter
overlap_ratio = getOptionalAttrib(xml_node, "GAFOverlapFilterRatio", typeFn=float, default=0)
length_ratio = getOptionalAttrib(xml_node, "GAFOverlapFilterMinLengthRatio", typeFn=float, default=0)
overlap_filter_len = getOptionalAttrib(xml_node, "minGAFQueryOverlapFilter", int, default=0)
min_block = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "minGAFBlockLength", typeFn=int, default=0)
min_mapq = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "minMAPQ", typeFn=int, default=0)
min_ident = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "minIdentity", typeFn=float, default=0)
if overlap_ratio or overlap_filter_len:
cmd = [cmd, ['gaffilter', '-', '-r', str(overlap_ratio), '-m', str(length_ratio), '-q', str(min_mapq),
'-b', str(min_block), '-o', str(overlap_filter_len), '-i', str(min_ident)]]
cactus_call(parameters=cmd, outfile=unstable_gaf_path, job_memory=job.memory)
# convert the unstable gaf into unstable paf, which is what cactus expects
# also tack on the unique id to the target column
graph_event = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "assemblyName", default="_MINIGRAPH_")
unstable_paf_path = unstable_gaf_path + '.paf'
unstable_paf_cmd = [['gaf2paf', unstable_gaf_path, '-l', mg_lengths_path],
['awk', 'BEGIN{{OFS=\" \"}} {{$6="id={}|"$6; print}}'.format(graph_event)]]
cactus_call(parameters=unstable_paf_cmd, outfile=unstable_paf_path, job_memory=job.memory)
# return the stable gaf (minigraph output) and the unstable paf
return job.fileStore.writeGlobalFile(gaf_path), job.fileStore.writeGlobalFile(unstable_paf_path)
def merge_pafs(job, paf_file_id_map, gzip=False):
""" merge up some pafs """
paf_paths = [job.fileStore.readGlobalFile(paf_id) for paf_id in paf_file_id_map.values()]
merged_path = job.fileStore.getLocalTempFile()
catFiles(paf_paths, merged_path)
if gzip:
cactus_call(parameters=['bgzip', merged_path, '--threads', str(job.cores)])
merged_path += '.gz'
return job.fileStore.writeGlobalFile(merged_path)
def extract_paf_from_gfa(job, gfa_id, gfa_path, ref_event, graph_event, ignore_paf_id):
""" make a paf directly from the rGFA tags. rgfa2paf supports other ranks, but we're only
using rank=0 here to produce an alignment for the reference genome """
work_dir = job.fileStore.getLocalTempDir()
# download the gfa
gfa_path = os.path.join(work_dir, os.path.basename(gfa_path))
job.fileStore.readGlobalFile(gfa_id, gfa_path, mutable=True)
# unzip if needed
if gfa_path.endswith(".gz"):
cactus_call(parameters=['bgzip', '-fd', gfa_path, '--threads', str(job.cores)])
gfa_path = gfa_path[:-3]
# optional paf whose queries we ignore
ignore_paf_path = os.path.join(work_dir, os.path.basename(gfa_path) + ".tofilter.paf")
if ignore_paf_id:
job.fileStore.readGlobalFile(ignore_paf_id, ignore_paf_path)
# make the paf
paf_path = job.fileStore.getLocalTempFile()
cmd = ['rgfa2paf', gfa_path, '-T', 'id={}|'.format(graph_event)]
if ref_event:
cmd += ['-P', 'id={}|'.format(ref_event)]
if ignore_paf_id:
cmd += ['-i', ignore_paf_path]
cactus_call(parameters=cmd, outfile=paf_path)
return job.fileStore.writeGlobalFile(paf_path)
def filter_paf(job, paf_id, config):
""" run basic paf-filtering. these are quick filters that are best to do on-the-fly when reading the paf and
as such, they are called by cactus-graphmap-split and cactus-align, not here """
work_dir = job.fileStore.getLocalTempDir()
paf_path = os.path.join(work_dir, 'mg.paf')
filter_paf_path = os.path.join(work_dir, 'mg.paf.filter')
job.fileStore.readGlobalFile(paf_id, paf_path)
min_block = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "minGAFBlockLength", typeFn=int, default=0)
min_mapq = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "minMAPQ", typeFn=int, default=0)
min_ident = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "minIdentity", typeFn=float, default=0)
RealtimeLogger.info("Running PAF filter with minBlock={} minMAPQ={} minIdentity={}".format(min_block, min_mapq, min_ident))
with open(paf_path, 'r') as paf_file, open(filter_paf_path, 'w') as filter_paf_file:
for line in paf_file:
toks = line.split('\t')
mapq = int(toks[11])
query_len = int(toks[1])
ident = float(toks[9]) / (float(toks[10]) + 0.00000001)
bl = None
for tok in toks[12:]:
# this is a special tag that was written by gaf2paf in order to preserve the original gaf block length
# we use it to be able to filter by the gaf block even after it's been broken in the paf
if tok.startswith('gl:i:'):
bl = int(tok[5:])
# we can also get the identity of the parent gaf block
if tok.startswith('gi:i:'):
ident = min(ident, float(toks[5:]))
if mapq >= min_mapq and (bl is None or query_len <= min_block or bl >= min_block) and ident >= min_ident:
filter_paf_file.write(line)
overlap_ratio = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "PAFOverlapFilterRatio", typeFn=float, default=0)
length_ratio = getOptionalAttrib(findRequiredNode(config.xmlRoot, "graphmap"), "PAFOverlapFilterMinLengthRatio", typeFn=float, default=0)
if overlap_ratio:
overlap_filter_paf_path = filter_paf_path + ".overlap"
cactus_call(parameters=['gaffilter', filter_paf_path, '-p', '-r', str(overlap_ratio), '-m', str(length_ratio),
'-b', str(min_block), '-q', str(min_mapq), '-i', str(min_ident)],
outfile=overlap_filter_paf_path, job_memory=job.memory)
filter_paf_path = overlap_filter_paf_path
return job.fileStore.writeGlobalFile(filter_paf_path)
def filter_paf_deletions(job, paf_id, gfa_id, max_deletion, filter_threshold, filter_query_size_threshold):
""" run filter-paf-deletions on a paf to break out giant-snarl-making edges """
work_dir = job.fileStore.getLocalTempDir()
paf_path = os.path.join(work_dir, 'mg.paf')
gfa_path = os.path.join(work_dir, 'mg.gfa')
job.fileStore.readGlobalFile(paf_id, paf_path)
job.fileStore.readGlobalFile(gfa_id, gfa_path)
# make the vg graph
vg_path = gfa_path + '.vg'
trans_path = gfa_path + '.trans'
cactus_call(parameters = ['vg', 'convert', '-r', '0', '-g', gfa_path, '-p', '-T', trans_path],
outfile=vg_path, job_memory=job.memory)
# call filter-paf-deletionts
filter_paf_path = paf_path + ".filter"
filter_log_path = paf_path + ".filter.log"
filter_paf_cmd = ['filter-paf-deletions', vg_path, trans_path, paf_path, '-d', str(max_deletion), '-v', '-p', '-t', str(job.cores)]
if filter_threshold:
filter_paf_cmd += ['-m', str(filter_threshold)]
if filter_query_size_threshold:
filter_paf_cmd += ['-s', str(filter_query_size_threshold)]
filter_stdout, filter_stderr = cactus_call(parameters=filter_paf_cmd, check_output=True, returnStdErr=True, job_memory=job.memory)
with open(filter_log_path, 'w') as filter_log_file:
for line in filter_stderr:
filter_log_file.write(line)
with open(filter_paf_path, 'w') as filter_paf_file:
for line in filter_stdout:
filter_paf_file.write(line)
unfiltered_paf_lines = int(cactus_call(parameters=['wc', '-l', paf_path], check_output=True).strip().split()[0])
filtered_paf_lines = int(cactus_call(parameters=['wc', '-l', filter_paf_path], check_output=True).strip().split()[0])
assert filtered_paf_lines <= unfiltered_paf_lines
was_filtered = filtered_paf_lines < unfiltered_paf_lines
# return the results
return (job.fileStore.writeGlobalFile(filter_paf_path), job.fileStore.writeGlobalFile(filter_log_path), was_filtered)
def add_genome_to_seqfile(seqfile_path, fasta_path, name):
""" hack the auto-generated minigraph assembly back into the seqfile for future use """
seq_file = SeqFile(seqfile_path)
# add the genome to the tree (branching off root)
in_tree = False
max_id = 0
for node in seq_file.tree.preOrderTraversal():
max_id = max(max_id, node)
if seq_file.tree.getName(node) == name:
in_tree = True
break
if not in_tree:
label = max_id + 1
seq_file.tree.nxDg.add_edge(0, label)
seq_file.tree.setName(label, name)
seq_file.tree.setWeight(0, label, seq_file.branchLen)
# add the sequence to the map
seq_file.pathMap[name] = fasta_path
# write the seq file back to disk
with open(seqfile_path, 'w') as seqfile_handle:
seqfile_handle.write(str(seq_file))
if __name__ == "__main__":
main()
|
7505766c613bd4b5b040d5e0aa1886cddcc1fa2d
|
d64bfb18b93bb9d237ddbaad5a8fa61379685faa
|
/test/test_grid.py
|
4d0792562fbf3ecddc6d96d3871a892a8f3e3419
|
[
"Apache-2.0"
] |
permissive
|
UXARRAY/uxarray
|
9f178b9896d9768825011809afbc00264046fba8
|
805e15114ccc6f606bb9b6f6a43bbfd73f07347d
|
refs/heads/main
| 2023-08-23T20:06:41.670476
| 2023-08-18T21:54:05
| 2023-08-18T21:54:05
| 421,447,986
| 118
| 23
|
Apache-2.0
| 2023-09-11T18:18:17
| 2021-10-26T14:00:08
|
Python
|
UTF-8
|
Python
| false
| false
| 32,857
|
py
|
test_grid.py
|
import os
import numpy as np
import numpy.testing as nt
import xarray as xr
from unittest import TestCase
from pathlib import Path
import uxarray as ux
from uxarray.grid.connectivity import _build_edge_node_connectivity, _build_face_edges_connectivity
from uxarray.grid.coordinates import _populate_cartesian_xyz_coord, _populate_lonlat_coord
try:
import constants
except ImportError:
from . import constants
current_path = Path(os.path.dirname(os.path.realpath(__file__)))
gridfile_CSne8 = current_path / "meshfiles" / "scrip" / "outCSne8" / "outCSne8.nc"
gridfile_RLL1deg = current_path / "meshfiles" / "ugrid" / "outRLL1deg" / "outRLL1deg.ug"
gridfile_RLL10deg_CSne4 = current_path / "meshfiles" / "ugrid" / "ov_RLL10deg_CSne4" / "ov_RLL10deg_CSne4.ug"
gridfile_CSne30 = current_path / "meshfiles" / "ugrid" / "outCSne30" / "outCSne30.ug"
gridfile_fesom = current_path / "meshfiles" / "ugrid" / "fesom" / "fesom.mesh.diag.nc"
gridfile_geoflow = current_path / "meshfiles" / "ugrid" / "geoflow-small" / "grid.nc"
dsfile_vortex_CSne30 = current_path / "meshfiles" / "ugrid" / "outCSne30" / "outCSne30_vortex.nc"
dsfile_var2_CSne30 = current_path / "meshfiles" / "ugrid" / "outCSne30" / "outCSne30_var2.nc"
shp_filename = current_path / "meshfiles" / "shp" / "grid_fire.shp"
class TestGrid(TestCase):
grid_CSne30 = ux.open_grid(gridfile_CSne30)
grid_RLL1deg = ux.open_grid(gridfile_RLL1deg)
grid_RLL10deg_CSne4 = ux.open_grid(gridfile_RLL10deg_CSne4)
def test_encode_as(self):
"""Reads a ugrid file and encodes it as `xarray.Dataset` in various
types."""
self.grid_CSne30.encode_as("ugrid")
self.grid_RLL1deg.encode_as("ugrid")
self.grid_RLL10deg_CSne4.encode_as("ugrid")
self.grid_CSne30.encode_as("exodus")
self.grid_RLL1deg.encode_as("exodus")
self.grid_RLL10deg_CSne4.encode_as("exodus")
def test_open_non_mesh2_write_exodus(self):
"""Loads grid files of different formats using uxarray's open_dataset
call."""
grid_geoflow = ux.open_grid(gridfile_CSne30)
exods = grid_geoflow.encode_as("exodus")
# Remove the _FillValue attribute from the variable's attributes
if '_FillValue' in grid_geoflow._ds['Mesh2_face_nodes'].attrs:
del grid_geoflow._ds['Mesh2_face_nodes'].attrs['_FillValue']
exods.to_netcdf("grid_geoflow.exo")
def test_init_verts(self):
"""Create a uxarray grid from multiple face vertices with duplicate
nodes and saves a ugrid file.
Also, test kwargs for grid initialization
The input cartesian coordinates represents 8 vertices on a cube
7---------6
/| /|
/ | / |
3---------2 |
| | | |
| 4------|--5
| / | /
|/ |/
0---------1
"""
cart_x = [
0.577340924821405, 0.577340924821405, 0.577340924821405,
0.577340924821405, -0.577345166204668, -0.577345166204668,
-0.577345166204668, -0.577345166204668
]
cart_y = [
0.577343045516932, 0.577343045516932, -0.577343045516932,
-0.577343045516932, 0.577338804118089, 0.577338804118089,
-0.577338804118089, -0.577338804118089
]
cart_z = [
0.577366836872017, -0.577366836872017, 0.577366836872017,
-0.577366836872017, 0.577366836872017, -0.577366836872017,
0.577366836872017, -0.577366836872017
]
# The order of the vertexes is irrelevant, the following indexing is just for forming a face matrix
face_vertices = [
[0, 1, 2, 3], # front face
[1, 5, 6, 2], # right face
[5, 4, 7, 6], # back face
[4, 0, 3, 7], # left face
[3, 2, 6, 7], # top face
[4, 5, 1, 0] # bottom face
]
# Pack the cart_x/y/z into the face matrix using the index from face_vertices
faces_coords = []
for face in face_vertices:
face_coords = []
for vertex_index in face:
x, y, z = cart_x[vertex_index], cart_y[vertex_index], cart_z[
vertex_index]
face_coords.append([x, y, z])
faces_coords.append(face_coords)
# Now consturct the grid using the faces_coords
verts_cart = np.array(faces_coords)
vgrid = ux.open_grid(verts_cart,
vertices=True,
islatlon=False,
isconcave=False)
assert (vgrid.source_grid == "From vertices")
assert (vgrid.nMesh2_face == 6)
assert (vgrid.nMesh2_node == 8)
vgrid.encode_as("ugrid")
# Test the case when user created a nested one-face grid
faces_verts_one = np.array([
np.array([[150, 10], [160, 20], [150, 30], [135, 30], [125, 20],
[135, 10]])
])
vgrid = ux.open_grid(faces_verts_one,
vertices=True,
islatlon=True,
isconcave=False)
assert (vgrid.source_grid == "From vertices")
assert (vgrid.nMesh2_face == 1)
assert (vgrid.nMesh2_node == 6)
vgrid.encode_as("ugrid")
# Test the case when user created a one-face grid
faces_verts_single_face = np.array([[150, 10], [160, 20], [150, 30],
[135, 30], [125, 20], [135, 10]])
vgrid = ux.open_grid(faces_verts_single_face,
vertices=True,
islatlon=True,
isconcave=False)
assert (vgrid.source_grid == "From vertices")
assert (vgrid.nMesh2_face == 1)
assert (vgrid.nMesh2_node == 6)
vgrid.encode_as("ugrid")
def test_init_verts_different_input_datatype(self):
"""Create a uxarray grid from multiple face vertices with different
datatypes(ndarray, list, tuple) and saves a ugrid file.
Also, test kwargs for grid initialization
"""
# Test initializing Grid from ndarray
faces_verts_ndarray = np.array([
np.array([[150, 10], [160, 20], [150, 30], [135, 30], [125, 20],
[135, 10]]),
np.array([[125, 20], [135, 30], [125, 60], [110, 60], [100, 30],
[105, 20]]),
np.array([[95, 10], [105, 20], [100, 30], [85, 30], [75, 20],
[85, 10]]),
])
vgrid = ux.open_grid(faces_verts_ndarray,
vertices=True,
islatlon=True,
isconcave=False)
assert (vgrid.source_grid == "From vertices")
assert (vgrid.nMesh2_face == 3)
assert (vgrid.nMesh2_node == 14)
vgrid.encode_as("ugrid")
# Test initializing Grid from list
faces_verts_list = [[[150, 10], [160, 20], [150, 30], [135, 30],
[125, 20], [135, 10]],
[[125, 20], [135, 30], [125, 60], [110, 60],
[100, 30], [105, 20]],
[[95, 10], [105, 20], [100, 30], [85, 30], [75, 20],
[85, 10]]]
vgrid = ux.open_grid(faces_verts_list,
vertices=True,
islatlon=False,
isconcave=False)
assert (vgrid.source_grid == "From vertices")
assert (vgrid.nMesh2_face == 3)
assert (vgrid.nMesh2_node == 14)
vgrid.encode_as("ugrid")
# Test initializing Grid from tuples
faces_verts_tuples = [
((150, 10), (160, 20), (150, 30), (135, 30), (125, 20), (135, 10)),
((125, 20), (135, 30), (125, 60), (110, 60), (100, 30), (105, 20)),
((95, 10), (105, 20), (100, 30), (85, 30), (75, 20), (85, 10))
]
vgrid = ux.open_grid(faces_verts_tuples,
vertices=True,
islatlon=False,
isconcave=False)
assert (vgrid.source_grid == "From vertices")
assert (vgrid.nMesh2_face == 3)
assert (vgrid.nMesh2_node == 14)
vgrid.encode_as("ugrid")
def test_init_verts_fill_values(self):
faces_verts_filled_values = [[[150, 10], [160, 20], [150, 30],
[135, 30], [125, 20], [135, 10]],
[[125, 20], [135, 30], [125, 60],
[110, 60], [100, 30],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE]],
[[95, 10], [105, 20], [100, 30], [85, 30],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE]]]
vgrid = ux.open_grid(faces_verts_filled_values,
vertices=True,
islatlon=False,
isconcave=False)
assert (vgrid.source_grid == "From vertices")
assert (vgrid.nMesh2_face == 3)
assert (vgrid.nMesh2_node == 12)
def test_grid_properties(self):
"""Tests to see if accessing variables through set properties is equal
to using the dict."""
# Dataset with standard UGRID variable names
# Coordinates
xr.testing.assert_equal(
self.grid_CSne30.Mesh2_node_x, self.grid_CSne30._ds[
self.grid_CSne30.grid_var_names["Mesh2_node_x"]])
xr.testing.assert_equal(
self.grid_CSne30.Mesh2_node_y, self.grid_CSne30._ds[
self.grid_CSne30.grid_var_names["Mesh2_node_y"]])
# Variables
xr.testing.assert_equal(
self.grid_CSne30.Mesh2_face_nodes, self.grid_CSne30._ds[
self.grid_CSne30.grid_var_names["Mesh2_face_nodes"]])
# Dimensions
n_nodes = self.grid_CSne30.Mesh2_node_x.shape[0]
n_faces, n_face_nodes = self.grid_CSne30.Mesh2_face_nodes.shape
self.assertEqual(n_nodes, self.grid_CSne30.nMesh2_node)
self.assertEqual(n_faces, self.grid_CSne30.nMesh2_face)
self.assertEqual(n_face_nodes, self.grid_CSne30.nMaxMesh2_face_nodes)
# xr.testing.assert_equal(
# self.tgrid1.nMesh2_node,
# self.tgrid1._ds[self.tgrid1.grid_var_names["nMesh2_node"]])
# xr.testing.assert_equal(
# self.tgrid1.nMesh2_face,
# self.tgrid1._ds[self.tgrid1.grid_var_names["nMesh2_face"]])
# Dataset with non-standard UGRID variable names
grid_geoflow = ux.open_grid(gridfile_geoflow)
xr.testing.assert_equal(
grid_geoflow.Mesh2_node_x,
grid_geoflow._ds[grid_geoflow.grid_var_names["Mesh2_node_x"]])
xr.testing.assert_equal(
grid_geoflow.Mesh2_node_y,
grid_geoflow._ds[grid_geoflow.grid_var_names["Mesh2_node_y"]])
# Variables
xr.testing.assert_equal(
grid_geoflow.Mesh2_face_nodes,
grid_geoflow._ds[grid_geoflow.grid_var_names["Mesh2_face_nodes"]])
# Dimensions
n_nodes = grid_geoflow.Mesh2_node_x.shape[0]
n_faces, n_face_nodes = grid_geoflow.Mesh2_face_nodes.shape
self.assertEqual(n_nodes, grid_geoflow.nMesh2_node)
self.assertEqual(n_faces, grid_geoflow.nMesh2_face)
self.assertEqual(n_face_nodes, grid_geoflow.nMaxMesh2_face_nodes)
def test_read_shpfile(self):
"""Reads a shape file and write ugrid file."""
with self.assertRaises(ValueError):
grid_shp = ux.open_grid(shp_filename)
def test_read_scrip(self):
"""Reads a scrip file."""
# Test read from scrip and from ugrid for grid class
grid_CSne8 = ux.open_grid(gridfile_CSne8) # tests from scrip
class TestOperators(TestCase):
grid_CSne30_01 = ux.open_grid(gridfile_CSne30)
grid_CSne30_02 = ux.open_grid(gridfile_CSne30)
grid_RLL1deg = ux.open_grid(gridfile_RLL1deg)
def test_eq(self):
"""Test Equals ('==') operator."""
assert self.grid_CSne30_01 == self.grid_CSne30_02
def test_ne(self):
"""Test Not Equals ('!=') operator."""
assert self.grid_CSne30_01 != self.grid_RLL1deg
class TestFaceAreas(TestCase):
grid_CSne30 = ux.open_grid(gridfile_CSne30)
def test_calculate_total_face_area_triangle(self):
"""Create a uxarray grid from vertices and saves an exodus file."""
verts = [[[0.57735027, -5.77350269e-01, -0.57735027],
[0.57735027, 5.77350269e-01, -0.57735027],
[-0.57735027, 5.77350269e-01, -0.57735027]]]
grid_verts = ux.open_grid(verts,
vertices=True,
islatlon=False,
isconcave=False)
#calculate area
area_gaussian = grid_verts.calculate_total_face_area(
quadrature_rule="gaussian", order=5)
nt.assert_almost_equal(area_gaussian, constants.TRI_AREA, decimal=3)
area_triangular = grid_verts.calculate_total_face_area(
quadrature_rule="triangular", order=4)
nt.assert_almost_equal(area_triangular, constants.TRI_AREA, decimal=1)
def test_calculate_total_face_area_file(self):
"""Create a uxarray grid from vertices and saves an exodus file."""
area = self.grid_CSne30.calculate_total_face_area()
nt.assert_almost_equal(area, constants.MESH30_AREA, decimal=3)
def test_calculate_total_face_area_sphere(self):
"""Computes the total face area of an MPAS mesh that lies on a unit
sphere, with an expected total face area of 4pi."""
mpas_grid_path = current_path / 'meshfiles' / "mpas" / "QU" / 'mesh.QU.1920km.151026.nc'
primal_grid = ux.open_grid(mpas_grid_path, use_dual=False)
dual_grid = ux.open_grid(mpas_grid_path, use_dual=True)
primal_face_area = primal_grid.calculate_total_face_area()
dual_face_area = dual_grid.calculate_total_face_area()
nt.assert_almost_equal(primal_face_area,
constants.UNIT_SPHERE_AREA,
decimal=3)
nt.assert_almost_equal(dual_face_area,
constants.UNIT_SPHERE_AREA,
decimal=3)
# TODO: Will depend on the decision for whether to provide integrate function
# from within `Grid` as well as UxDataset
# def test_integrate(self):
# xr_psi = xr.open_dataset(dsfile_vortex_CSne30)
# xr_v2 = xr.open_dataset(dsfile_var2_CSne30)
#
# integral_psi = self.grid_CSne30.integrate(xr_psi)
# integral_var2 = self.grid_CSne30.integrate(xr_v2)
#
# nt.assert_almost_equal(integral_psi, constants.PSI_INTG, decimal=3)
# nt.assert_almost_equal(integral_var2, constants.VAR2_INTG, decimal=3)
def test_compute_face_areas_geoflow_small(self):
"""Checks if the GeoFlow Small can generate a face areas output."""
grid_geoflow = ux.open_grid(gridfile_geoflow)
grid_geoflow.compute_face_areas()
# TODO: Add this test after fix to tranposed face nodes
# def test_compute_face_areas_fesom(self):
# """Checks if the FESOM PI-Grid Output can generate a face areas
# output."""
# grid_fesom = ux.open_grid(gridfile_fesom)
#
# grid_fesom.compute_face_areas()
def test_verts_calc_area(self):
faces_verts_ndarray = np.array([
np.array([[150, 10, 0], [160, 20, 0], [150, 30, 0], [135, 30, 0],
[125, 20, 0], [135, 10, 0]]),
np.array([[125, 20, 0], [135, 30, 0], [125, 60, 0], [110, 60, 0],
[100, 30, 0], [105, 20, 0]]),
np.array([[95, 10, 0], [105, 20, 0], [100, 30, 0], [85, 30, 0],
[75, 20, 0], [85, 10, 0]]),
])
# load our vertices into a UXarray Grid object
verts_grid = ux.open_grid(faces_verts_ndarray,
vertices=True,
islatlon=True,
isconcave=False)
face_verts_areas = verts_grid.face_areas
nt.assert_almost_equal(face_verts_areas.sum(),
constants.FACE_VERTS_AREA,
decimal=3)
class TestPopulateCoordinates(TestCase):
def test_populate_cartesian_xyz_coord(self):
# The following testcases are generated through the matlab cart2sph/sph2cart functions
# These points correspond to the eight vertices of a cube.
lon_deg = [
45.0001052295749, 45.0001052295749, 360 - 45.0001052295749,
360 - 45.0001052295749
]
lat_deg = [
35.2655522903022, -35.2655522903022, 35.2655522903022,
-35.2655522903022
]
cart_x = [
0.577340924821405, 0.577340924821405, 0.577340924821405,
0.577340924821405
]
cart_y = [
0.577343045516932, 0.577343045516932, -0.577343045516932,
-0.577343045516932
]
cart_z = [
-0.577366836872017, 0.577366836872017, -0.577366836872017,
0.577366836872017
]
verts_degree = np.stack((lon_deg, lat_deg), axis=1)
vgrid = ux.open_grid(verts_degree, islatlon=False)
_populate_cartesian_xyz_coord(vgrid)
for i in range(0, vgrid.nMesh2_node):
nt.assert_almost_equal(vgrid._ds["Mesh2_node_cart_x"].values[i],
cart_x[i],
decimal=12)
nt.assert_almost_equal(vgrid._ds["Mesh2_node_cart_y"].values[i],
cart_y[i],
decimal=12)
nt.assert_almost_equal(vgrid._ds["Mesh2_node_cart_z"].values[i],
cart_z[i],
decimal=12)
def test_populate_lonlat_coord(self):
# The following testcases are generated through the matlab cart2sph/sph2cart functions
# These points correspond to the 4 vertexes on a cube.
lon_deg = [
45.0001052295749, 45.0001052295749, 360 - 45.0001052295749,
360 - 45.0001052295749
]
lat_deg = [
35.2655522903022, -35.2655522903022, 35.2655522903022,
-35.2655522903022
]
cart_x = [
0.577340924821405, 0.577340924821405, 0.577340924821405,
0.577340924821405
]
cart_y = [
0.577343045516932, 0.577343045516932, -0.577343045516932,
-0.577343045516932
]
cart_z = [
0.577366836872017, -0.577366836872017, 0.577366836872017,
-0.577366836872017
]
verts_cart = np.stack((cart_x, cart_y, cart_z), axis=1)
vgrid = ux.open_grid(verts_cart, islatlon=False)
_populate_lonlat_coord(vgrid)
# The connectivity in `__from_vert__()` will be formed in a reverse order
lon_deg, lat_deg = zip(*reversed(list(zip(lon_deg, lat_deg))))
for i in range(0, vgrid.nMesh2_node):
nt.assert_almost_equal(vgrid._ds["Mesh2_node_x"].values[i],
lon_deg[i],
decimal=12)
nt.assert_almost_equal(vgrid._ds["Mesh2_node_y"].values[i],
lat_deg[i],
decimal=12)
class TestConnectivity(TestCase):
mpas_filepath = current_path / "meshfiles" / "mpas" / "QU" / "mesh.QU.1920km.151026.nc"
exodus_filepath = current_path / "meshfiles" / "exodus" / "outCSne8" / "outCSne8.g"
ugrid_filepath_01 = current_path / "meshfiles" / "ugrid" / "outCSne30" / "outCSne30.ug"
ugrid_filepath_02 = current_path / "meshfiles" / "ugrid" / "outRLL1deg" / "outRLL1deg.ug"
ugrid_filepath_03 = current_path / "meshfiles" / "ugrid" / "ov_RLL10deg_CSne4" / "ov_RLL10deg_CSne4.ug"
grid_mpas = ux.open_grid(mpas_filepath)
grid_exodus = ux.open_grid(exodus_filepath)
grid_ugrid = ux.open_grid(ugrid_filepath_01)
# used from constructing vertices
f0_deg = [[120, -20], [130, -10], [120, 0], [105, 0], [95, -10], [105, -20]]
f1_deg = [[120, 0], [120, 10], [115, 0],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE]]
f2_deg = [[115, 0], [120, 10], [100, 10], [105, 0],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE]]
f3_deg = [[95, -10], [105, 0], [95, 30], [80, 30], [70, 0], [75, -10]]
f4_deg = [[65, -20], [75, -10], [70, 0], [55, 0], [45, -10], [55, -20]]
f5_deg = [[70, 0], [80, 30], [70, 30], [60, 0],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE]]
f6_deg = [[60, 0], [70, 30], [40, 30], [45, 0],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE],
[ux.INT_FILL_VALUE, ux.INT_FILL_VALUE]]
# Helper function
def _revert_edges_conn_to_face_nodes_conn(
self, edge_nodes_connectivity: np.ndarray,
face_edges_connectivity: np.ndarray,
original_face_nodes_connectivity: np.ndarray):
"""utilize the edge_nodes_connectivity and face_edges_connectivity to
generate the res_face_nodes_connectivity in the counter-clockwise
order. The counter-clockwise order will be enforced by the passed in
original_face_edges_connectivity. We will only use the first two nodes
in the original_face_edges_connectivity. The order of these two nodes
will provide a correct counter-clockwise order to build our
res_face_nodes_connectivity. A ValueError will be raised if the first
two nodes in the res_face_nodes_connectivity and the
original_face_nodes_connectivity are not the same elements (The order
doesn't matter here).
Parameters
----------
edge_nodes_connectivity : np.ndarray
The edge_nodes_connectivity array
face_edges_connectivity : np.ndarray
The face_edges_connectivity array
original_face_nodes_connectivity : np.ndarray
The original face_nodes_connectivity array
Returns
-------
res_face_nodes_connectivity : np.ndarray
The face_nodes_connectivity array in the counter-clockwise order
Raises
------
ValueError
if the first two nodes in the res_face_nodes_connectivity are not the same as the first two nodes in the
original_face_nodes_connectivity
"""
# Create a dictionary to store the face indices for each edge
face_nodes_dict = {}
# Loop through each face and edge to build the dictionary
for face_idx, face_edges in enumerate(face_edges_connectivity):
for edge_idx in face_edges:
if edge_idx != ux.INT_FILL_VALUE:
edge = edge_nodes_connectivity[edge_idx]
if face_idx not in face_nodes_dict:
face_nodes_dict[face_idx] = []
face_nodes_dict[face_idx].append(edge[0])
face_nodes_dict[face_idx].append(edge[1])
# Make sure the face_nodes_dict is in the counter-clockwise order and remove duplicate nodes
for face_idx, face_nodes in face_nodes_dict.items():
# First need to re-position the first two nodes position according to the original face_nodes_connectivity
first_edge_correct = np.array([
original_face_nodes_connectivity[face_idx][0],
original_face_nodes_connectivity[face_idx][1]
])
first_edge = np.array([face_nodes[0], face_nodes[1]])
first_edge_correct_copy = first_edge_correct.copy()
first_edge_copy = first_edge.copy()
self.assertTrue(
np.array_equal(first_edge_correct_copy.sort(),
first_edge_copy.sort()))
face_nodes[0] = first_edge_correct[0]
face_nodes[1] = first_edge_correct[1]
i = 2
while i < len(face_nodes):
if face_nodes[i] != face_nodes[i - 1]:
# swap the order
old = face_nodes[i]
face_nodes[i] = face_nodes[i - 1]
face_nodes[i + 1] = old
i += 2
after_swapped = face_nodes
after_swapped_remove = [after_swapped[0]]
for i in range(1, len(after_swapped) - 1):
if after_swapped[i] != after_swapped[i - 1]:
after_swapped_remove.append(after_swapped[i])
face_nodes_dict[face_idx] = after_swapped_remove
# Convert the dictionary to a list
res_face_nodes_connectivity = []
for face_idx in range(len(face_edges_connectivity)):
res_face_nodes_connectivity.append(face_nodes_dict[face_idx])
while len(res_face_nodes_connectivity[face_idx]
) < original_face_nodes_connectivity.shape[1]:
res_face_nodes_connectivity[face_idx].append(ux.INT_FILL_VALUE)
return np.array(res_face_nodes_connectivity)
def test_build_nNodes_per_face(self):
"""Tests the construction of the ``nNodes_per_face`` variable."""
# test on grid constructed from sample datasets
grids = [self.grid_mpas, self.grid_exodus, self.grid_ugrid]
for grid in grids:
# highest possible dimension dimension for a face
max_dimension = grid.nMaxMesh2_face_nodes
# face must be at least a triangle
min_dimension = 3
assert grid.nNodes_per_face.min() >= min_dimension
assert grid.nNodes_per_face.max() <= max_dimension
# test on grid constructed from vertices
verts = [
self.f0_deg, self.f1_deg, self.f2_deg, self.f3_deg, self.f4_deg,
self.f5_deg, self.f6_deg
]
grid_from_verts = ux.open_grid(verts)
# number of non-fill-value nodes per face
expected_nodes_per_face = np.array([6, 3, 4, 6, 6, 4, 4], dtype=int)
nt.assert_equal(grid_from_verts.nNodes_per_face.values,
expected_nodes_per_face)
def test_edge_nodes_euler(self):
"""Verifies that (``nMesh2_edge``) follows euler's formula."""
grid_paths = [
self.exodus_filepath, self.ugrid_filepath_01,
self.ugrid_filepath_02, self.ugrid_filepath_03
]
for grid_path in grid_paths:
grid_ux = ux.open_grid(grid_path)
n_face = grid_ux.nMesh2_face
n_node = grid_ux.nMesh2_node
n_edge = grid_ux.nMesh2_edge
# euler's formula (n_face = n_edges - n_nodes + 2)
assert (n_face == n_edge - n_node + 2)
def test_build_face_edges_connectivity_mpas(self):
"""Tests the construction of (``Mesh2_edge_nodes``) on an MPAS grid
with known edge nodes."""
# grid with known edge node connectivity
mpas_grid_ux = ux.open_grid(self.mpas_filepath)
edge_nodes_expected = mpas_grid_ux._ds['Mesh2_edge_nodes'].values
# arrange edge nodes in the same manner as Grid._build_edge_node_connectivity
edge_nodes_expected.sort(axis=1)
edge_nodes_expected = np.unique(edge_nodes_expected, axis=0)
# construct edge nodes
_build_edge_node_connectivity(mpas_grid_ux, repopulate=True)
edge_nodes_output = mpas_grid_ux._ds['Mesh2_edge_nodes'].values
self.assertTrue(np.array_equal(edge_nodes_expected, edge_nodes_output))
# euler's formula (n_face = n_edges - n_nodes + 2)
n_face = mpas_grid_ux.nMesh2_node
n_node = mpas_grid_ux.nMesh2_face
n_edge = edge_nodes_output.shape[0]
assert (n_face == n_edge - n_node + 2)
def test_build_face_edges_connectivity(self):
"""Generates Grid.Mesh2_edge_nodes from Grid.Mesh2_face_nodes."""
ug_filename_list = [
self.ugrid_filepath_01, self.ugrid_filepath_02,
self.ugrid_filepath_03
]
for ug_file_name in ug_filename_list:
tgrid = ux.open_grid(ug_file_name)
mesh2_face_nodes = tgrid._ds["Mesh2_face_nodes"]
_build_face_edges_connectivity(tgrid)
mesh2_face_edges = tgrid._ds.Mesh2_face_edges
mesh2_edge_nodes = tgrid._ds.Mesh2_edge_nodes
# Assert if the mesh2_face_edges sizes are correct.
self.assertEqual(mesh2_face_edges.sizes["nMesh2_face"],
mesh2_face_nodes.sizes["nMesh2_face"])
self.assertEqual(mesh2_face_edges.sizes["nMaxMesh2_face_edges"],
mesh2_face_nodes.sizes["nMaxMesh2_face_nodes"])
# Assert if the mesh2_edge_nodes sizes are correct.
# Euler formular for determining the edge numbers: n_face = n_edges - n_nodes + 2
num_edges = mesh2_face_edges.sizes["nMesh2_face"] + tgrid._ds[
"Mesh2_node_x"].sizes["nMesh2_node"] - 2
size = mesh2_edge_nodes.sizes["nMesh2_edge"]
self.assertEqual(mesh2_edge_nodes.sizes["nMesh2_edge"], num_edges)
original_face_nodes_connectivity = tgrid._ds.Mesh2_face_nodes.values
reverted_mesh2_edge_nodes = self._revert_edges_conn_to_face_nodes_conn(
edge_nodes_connectivity=mesh2_edge_nodes.values,
face_edges_connectivity=mesh2_face_edges.values,
original_face_nodes_connectivity=original_face_nodes_connectivity
)
for i in range(len(reverted_mesh2_edge_nodes)):
self.assertTrue(
np.array_equal(reverted_mesh2_edge_nodes[i],
original_face_nodes_connectivity[i]))
def test_build_face_edges_connectivity_mpas(self):
tgrid = ux.open_grid(self.mpas_filepath)
mesh2_face_nodes = tgrid._ds["Mesh2_face_nodes"]
_build_face_edges_connectivity(tgrid)
mesh2_face_edges = tgrid._ds.Mesh2_face_edges
mesh2_edge_nodes = tgrid._ds.Mesh2_edge_nodes
# Assert if the mesh2_face_edges sizes are correct.
self.assertEqual(mesh2_face_edges.sizes["nMesh2_face"],
mesh2_face_nodes.sizes["nMesh2_face"])
self.assertEqual(mesh2_face_edges.sizes["nMaxMesh2_face_edges"],
mesh2_face_nodes.sizes["nMaxMesh2_face_nodes"])
# Assert if the mesh2_edge_nodes sizes are correct.
# Euler formular for determining the edge numbers: n_face = n_edges - n_nodes + 2
num_edges = mesh2_face_edges.sizes["nMesh2_face"] + tgrid._ds[
"Mesh2_node_x"].sizes["nMesh2_node"] - 2
size = mesh2_edge_nodes.sizes["nMesh2_edge"]
self.assertEqual(mesh2_edge_nodes.sizes["nMesh2_edge"], num_edges)
def test_build_face_edges_connectivity_fillvalues(self):
verts = [
self.f0_deg, self.f1_deg, self.f2_deg, self.f3_deg, self.f4_deg,
self.f5_deg, self.f6_deg
]
uds = ux.open_grid(verts)
_build_face_edges_connectivity(uds)
n_face = len(uds._ds["Mesh2_face_edges"].values)
n_node = uds.nMesh2_node
n_edge = len(uds._ds["Mesh2_edge_nodes"].values)
self.assertEqual(7, n_face)
self.assertEqual(21, n_node)
self.assertEqual(28, n_edge)
# We will utilize the edge_nodes_connectivity and face_edges_connectivity to generate the
# res_face_nodes_connectivity and compare it with the uds._ds["Mesh2_face_nodes"].values
edge_nodes_connectivity = uds._ds["Mesh2_edge_nodes"].values
face_edges_connectivity = uds._ds["Mesh2_face_edges"].values
face_nodes_connectivity = uds._ds["Mesh2_face_nodes"].values
res_face_nodes_connectivity = self._revert_edges_conn_to_face_nodes_conn(
edge_nodes_connectivity, face_edges_connectivity,
face_nodes_connectivity)
# Compare the res_face_nodes_connectivity with the uds._ds["Mesh2_face_nodes"].values
self.assertTrue(
np.array_equal(res_face_nodes_connectivity,
uds._ds["Mesh2_face_nodes"].values))
|
fede0f06455559526c652d858175802d578f7774
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/core/dmf/experiment.py
|
a41004ac4b284b2872033e5166c9c7d84c37df04
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,328
|
py
|
experiment.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
The 'experiment' is a root container for a coherent
set of 'resources'.
"""
# TODO: Missing docstrings
# pylint: disable=missing-function-docstring
# stdlib
from copy import deepcopy
import logging
# local
from idaes.core.dmf import resource, errors
from idaes.core.dmf.resource import Predicates, ResourceTypes
__author__ = "Dan Gunter <dkgunter@lbl.gov>"
_log = logging.getLogger(__name__)
class Experiment(resource.Resource):
"""An experiment is a way of grouping resources in a way that
makes sense to the user.
It is also a useful unit for passing as an argument to functions,
since it has a standard 'slot' for the DMF instance that created it.
"""
def __init__(self, dmf, **kwargs):
"""Constructor. Adds the new experiment to the DMF.
Args:
dmf (DMF): Data Management Framework instance.
kwargs: Keyword arguments passed to parent class.
"""
super(Experiment, self).__init__(value=kwargs)
self.v[self.TYPE_FIELD] = ResourceTypes.experiment
dmf.add(self)
self._dmf = dmf
@property
def dmf(self):
return self._dmf
def add(self, rsrc):
"""Add a resource to an experiment.
This does two things:
1. Establishes an "experiment" type of relationship between the
new resource and the experiment.
2. Adds the resource to the DMF
Args:
rsrc (resource.Resource): The resource to add.
Returns:
resource.Resource: Added (input) resource, for chaining calls.
"""
resource.create_relation(self, Predicates.contains, rsrc)
self._dmf.update(rsrc, upsert=True)
self._dmf.update(self)
def copy(self, new_id=True, **kwargs):
"""Get a copy of this experiment. The returned object will
have been added to the DMF.
Args:
new_id (bool): If True, generate a new unique ID for the copy.
kwargs: Values to set in new instance after copying.
Returns:
Experiment: A (mostly deep) copy.
Note that the DMF instance is just a reference to the
same object as in the original, and they will share state.
"""
new_exp = Experiment(self._dmf)
new_exp.v = deepcopy(self.v)
new_exp.v.update(kwargs)
if new_id:
new_exp.set_id()
self._dmf.add(new_exp)
return new_exp
def update(self):
"""Update experiment to current values."""
self._dmf.update(self, sync_relations=True)
def remove(self):
"""Remove this experiment from the associated DMF instance."""
# remove from the DMF
self._dmf.remove(self.id)
# cut the connection to the DMF instance
self._dmf = None
# disable known methods (via monkeypatching!)
for m in "add", "update", "remove", "link", "copy":
self.__dict__[m] = self._removed
def link(self, subj, predicate=Predicates.contains, obj=None):
"""Add and update relation triple in DMF.
Args:
subj (resource.Resource): Subject
predicate (str): Predicate
obj (resource.Resource): Object
Returns:
None
"""
if obj is None:
obj = self
resource.create_relation(subj, predicate, obj)
self._dmf.update(subj)
self._dmf.update(obj)
def _removed(self, *args, **kwargs):
raise errors.BadResourceError("This experiment has been removed")
|
bd5cd34d65ba7442bb0a3cd9d67c6f17dc77a63f
|
4bdbd0756f56732e44d57c291ffaca4d0c8dfe90
|
/fastparquet/benchmarks/columns.py
|
fe59834d29c23e57c4f30e08704e9303cabc7f51
|
[
"Apache-2.0"
] |
permissive
|
dask/fastparquet
|
5e9dbce2d1ce74f55b95374357e1888a6dcff320
|
df1219300a96bc1baf9ebad85f4f5676a130c9e8
|
refs/heads/main
| 2023-09-04T08:51:25.940003
| 2023-08-30T15:00:51
| 2023-08-30T15:00:51
| 45,694,060
| 716
| 210
|
Apache-2.0
| 2023-08-30T14:30:53
| 2015-11-06T16:42:17
|
Python
|
UTF-8
|
Python
| false
| false
| 7,066
|
py
|
columns.py
|
import numpy as np
import os
import pandas as pd
import shutil
import sys
import tempfile
import time
from contextlib import contextmanager
from fastparquet import write, ParquetFile
from fastparquet.util import join_path
@contextmanager
def measure(name, result):
t0 = time.time()
yield
t1 = time.time()
result[name] = round((t1 - t0) * 1000, 3)
def time_column():
with tmpdir() as tempdir:
result = {}
fn = join_path('temp.parq')
n = 10000000
r = np.random.randint(-1e10, 1e10, n, dtype='int64')
d = pd.DataFrame({'w': pd.Categorical(np.random.choice(
['hi', 'you', 'people'], size=n)),
'x': r.view('timedelta64[ns]'),
'y': r / np.random.randint(1, 1000, size=n),
'z': np.random.randint(0, 127, size=n,
dtype=np.uint8)})
d['b'] = r > 0
for col in d.columns:
df = d[[col]]
write(fn, df)
with measure('%s: write, no nulls' % d.dtypes[col], result):
write(fn, df, has_nulls=False)#, compression="SNAPPY")
pf = ParquetFile(fn)
with measure("file open", result):
ParquetFile(fn)
if col == 'x':
assert (df.x.astype('timedelta64[us]') == df.x.astype('timedelta64[us]')).all()
else:
assert (pf.to_pandas() == df).values.all() # warm-up
with measure('%s: read, no nulls' % d.dtypes[col], result):
pf.to_pandas()
with measure('%s: write, no nulls, has_null=True' % d.dtypes[col], result):
write(fn, df, has_nulls=True)#, compression="SNAPPY")
pf = ParquetFile(fn)
if col == 'x':
assert (df.x.astype('timedelta64[us]') == df.x.astype('timedelta64[us]')).all()
else:
assert (pf.to_pandas() == df).values.all() # warm-up
with measure('%s: read, no nulls, has_null=True' % d.dtypes[col], result):
pf.to_pandas()
if d.dtypes[col].kind == 'm':
d.loc[n//2, col] = pd.to_datetime('NaT')
elif d.dtypes[col].kind == 'f':
d.loc[n//2, col] = np.nan
elif d.dtypes[col].kind in ['i', 'u']:
continue
else:
d.loc[n//2, col] = None
with measure('%s: write, with null, has_null=True' % d.dtypes[col], result):
write(fn, df, has_nulls=True)#, compression="SNAPPY")
pf = ParquetFile(fn)
if col == 'x':
assert (df.x.astype('timedelta64[us]') == df.x.astype('timedelta64[us]')).all()
else:
assert (pf.to_pandas() == df)[~df[col].isna()].values.all() # warm-up
assert (pf.to_pandas().isna() == df.isna()).values.all() # warm-up
with measure('%s: read, with null, has_null=True' % d.dtypes[col], result):
pf.to_pandas()
with measure('%s: write, with null, has_null=False' % d.dtypes[col], result):
write(fn, df, has_nulls=False)#, compression="SNAPPY")
pf = ParquetFile(fn)
if col == 'x':
assert (df.x.astype('timedelta64[us]') == df.x.astype('timedelta64[us]')).all()
else:
assert (pf.to_pandas() == df)[~df[col].isna()].values.all() # warm-up
assert (pf.to_pandas().isna() == df.isna()).values.all() # warm-up
with measure('%s: read, with null, has_null=False' % d.dtypes[col], result):
pf.to_pandas()
return result
def time_text():
with tmpdir() as tempdir:
result = {}
fn = join_path(tempdir, 'temp.parq')
n = 1000000
d = pd.DataFrame({
'a': np.random.choice(['hi', 'you', 'people'], size=n),
'b': np.random.choice([b'hi', b'you', b'people'], size=n)})
for col in d.columns:
for fixed in [None, 6]:
df = d[[col]]
if isinstance(df.iloc[0, 0], bytes):
t = "bytes"
else:
t = 'utf8'
write(fn, df)
with measure('%s: write, fixed: %s' % (t, fixed), result):
write(fn, df, has_nulls=False, write_index=False,
fixed_text={col: fixed}, object_encoding=t)
pf = ParquetFile(fn)
pf.to_pandas() # warm-up
with measure('%s: read, fixed: %s' % (t, fixed), result):
pf.to_pandas()
return result
def time_find_nulls(N=10000000):
x = np.random.random(N)
df = pd.DataFrame({'x': x})
result = {}
run_find_nulls(df, result)
df.loc[N//2, 'x'] = np.nan
run_find_nulls(df, result)
df.loc[:, 'x'] = np.nan
df.loc[N//2, 'x'] = np.random.random()
run_find_nulls(df, result)
df.loc[N//2, 'x'] = np.nan
run_find_nulls(df, result)
x = np.random.randint(0, 2**30, N)
df = pd.DataFrame({'x': x})
run_find_nulls(df, result)
df = pd.DataFrame({'x': x.view('datetime64[s]')})
run_find_nulls(df, result)
v = df.loc[N//2, 'x']
df.loc[N//2, 'x'] = pd.to_datetime('NaT')
run_find_nulls(df, result)
df.loc[:, 'x'] = pd.to_datetime('NaT')
df.loc[N//2, 'x'] = v
run_find_nulls(df, result)
df.loc[:, 'x'] = pd.to_datetime('NaT')
run_find_nulls(df, result)
return df.groupby(('type', 'nvalid', 'op')).sum()
def run_find_nulls(df, res):
nvalid = (df.x == df.x).sum()
with measure((df.x.dtype.kind, nvalid, 'notnull'), res):
df.x.notnull()
with measure((df.x.dtype.kind, nvalid, 'notnull,sum'), res):
df.x.notnull().sum()
with measure((df.x.dtype.kind, nvalid, 'notnull,any'), res):
df.x.notnull().any()
with measure((df.x.dtype.kind, nvalid, 'notnull,all'), res):
df.x.notnull().all()
with measure((df.x.dtype.kind, nvalid, 'count'), res):
df.x.count()
# from https://github.com/dask/dask/blob/6cbcf0813af48597a427a1fe6c71cce2a79086b0/dask/utils.py#L78
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
# from https://github.com/dask/dask/blob/6cbcf0813af48597a427a1fe6c71cce2a79086b0/dask/utils.py#L116
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
with ignoring(OSError):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
if __name__ == '__main__':
result = {}
print("sys.version = " + sys.version)
print("sys.platform = " + sys.platform)
for f in [time_column, time_text]:
result.update(f())
for k in sorted(result):
print(k, result[k])
|
2da81a6e69d367454d7cb2e9acfae7a8a36714ab
|
8d585fa3b2419d9b993be2f2652e448cfeedc8b2
|
/tests/core/test_datadog.py
|
afb6b9a99da136a02f7bd50d7d88707cf0f17e77
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
DataDog/dd-agent
|
bd4ef0edb234293b51d30894a529ce94b37060f8
|
16fa4ec9ae11ca0adfffbd260c5b4899dc73509f
|
refs/heads/master
| 2023-08-16T09:52:21.816487
| 2023-07-11T15:37:34
| 2023-07-11T15:37:34
| 1,210,071
| 1,227
| 991
|
NOASSERTION
| 2023-06-28T12:20:19
| 2010-12-31T03:02:47
|
Python
|
UTF-8
|
Python
| false
| false
| 22,293
|
py
|
test_datadog.py
|
# stdlib
import calendar
from datetime import datetime
import logging
import os
import re
from tempfile import gettempdir, NamedTemporaryFile
import time
import unittest
# 3p
from nose.plugins.attrib import attr
# project
from checks.datadog import Dogstreams, EventDefaults
log = logging.getLogger('datadog.test')
def parse_ancient_function_plugin(logger, line):
"""Ancient stateless parser"""
res = line.split()
res[3] = {'metric_type': 'gauge'}
def parse_function_plugin(logger, line, state):
"""Simple stateful parser"""
try:
acc = state["test_acc"] + 1
except KeyError:
acc = 1
state["test_acc"] = acc
res = line.split()
res[2] = acc
res[3] = {'metric_type': 'counter'}
return tuple(res)
class ParseClassPlugin(object):
"""Class-based stateful parser"""
def __init__(self, logger=None, user_args=(), **kwargs):
self.logger = logger
self.args = '.'.join(user_args)
self.acc = 0
self.logger.info('Completed initialization')
def parse_line(self, line):
self.logger.info('Parsing line %r; counter is %r', line, self.acc)
self.acc += 1
res = line.split()
res[0] = self.args + ':' + res[0]
res[2] = self.acc
res[3] = {'metric_type': 'counter'}
return tuple(res)
log_event_pattern = re.compile("".join([
r"(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) ", # iso timestamp
r"\[(?P<alert_type>(ERROR)|(RECOVERY))\] - ", # alert type
r"(?P<msg_title>(?P<host>[^ ]*).*)"
]))
alert_types = {
"ERROR": "error",
"RECOVERY": "success"
}
def parse_events(logger, line):
""" Expecting lines like this:
2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)
"""
match = log_event_pattern.match(line)
if match:
groups = match.groupdict()
groups.update({
'alert_type': alert_types.get(groups['alert_type'], ''),
'timestamp': calendar.timegm(datetime.strptime(groups['timestamp'], '%Y-%m-%d %H:%M:%S').timetuple()),
'msg_text': line
})
return groups
else:
return None
def repr_event_parser(logger, line):
return eval(line)
class TailTestCase(unittest.TestCase):
def setUp(self):
self.log_file = NamedTemporaryFile()
self.logger = logging.getLogger('test.dogstream')
def _write_log(self, log_data):
for data in log_data:
print >> self.log_file, data
self.log_file.flush()
def tearDown(self):
self.log_file.close()
# Don't run these tests on Windows because the temp file scheme used in them
# is hard to support on Windows
@attr('unix')
class TestDogstream(TailTestCase):
gauge = {'metric_type': 'gauge'}
counter = {'metric_type': 'counter'}
def setUp(self):
TailTestCase.setUp(self)
self.config = {
'dogstreams': self.log_file.name,
'check_freq': 5,
}
log.info("Test config: %s" % self.config)
self.dogstream = Dogstreams.init(self.logger, self.config)
self.maxDiff = None
def test_dogstream_gauge(self):
log_data = [
# bucket 0
('test.metric.a', '1000000000', '10', 'metric_type=gauge'),
('test.metric.a', '1000000001', '20', 'metric_type=gauge'),
('test.metric.a', '1000000002', '3', 'metric_type=gauge'),
('test.metric.a', '1000000003', '4', 'metric_type=gauge'),
('test.metric.a', '1000000004', '5', 'metric_type=gauge'),
# bucket 1
('test.metric.a', '1000000005', '12', 'metric_type=gauge'),
('test.metric.a', '1000000006', '7', 'metric_type=gauge'),
('test.metric.a', '1000000007', '8', 'metric_type=gauge'),
]
expected_output = {
"dogstream": [
('test.metric.a', 1000000000, 5.0, self.gauge),
('test.metric.a', 1000000005, 8.0, self.gauge),
]
}
self._write_log((' '.join(data) for data in log_data))
actual_output = self.dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
for metric, timestamp, val, attrib in expected_output['dogstream']:
assert isinstance(val, float)
def test_dogstream_counter(self):
log_data = [
# bucket 0
('test.metric.a', '1000000000', '10', 'metric_type=counter'),
('test.metric.a', '1000000001', '20', 'metric_type=counter'),
('test.metric.a', '1000000002', '3', 'metric_type=counter'),
('test.metric.a', '1000000003', '4', 'metric_type=counter'),
('test.metric.a', '1000000004', '5', 'metric_type=counter'),
# bucket 1
('test.metric.a', '1000000005', '12', 'metric_type=counter'),
('test.metric.a', '1000000006', '7', 'metric_type=counter'),
('test.metric.a', '1000000007', '8', 'metric_type=counter'),
]
expected_output = {
"dogstream": [
('test.metric.a', 1000000000, 42, self.counter),
('test.metric.a', 1000000005, 27, self.counter),
]
}
self._write_log((' '.join(data) for data in log_data))
actual_output = self.dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
for metric, timestamp, val, attrib in expected_output['dogstream']:
assert isinstance(val, (int, long))
def test_dogstream_bad_input(self):
log_data = [
('test.metric.e1000000000 1metric_type=gauge'),
('1000000001 1 metric_type=gauge tag=staging'),
('test_metric.e 1 1000000002 metric_type=gauge'),
('test_metric.e 1000000002 10 metric_type=gauge'),
]
expected_output = {"dogstream":
[('test_metric.e', 1000000000, 10, self.gauge)]
}
self._write_log(log_data)
actual_output = self.dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
def test_dogstream_io_error(self):
log_data = [
('test_metric.e 1000000000 10 metric_type=gauge'),
]
expected_output = {"dogstream":
[('test_metric.e', 1000000000, 10, self.gauge)]
}
self._write_log(log_data)
# Simulate missing file by making it unreadable
os.chmod(self.log_file.name, 0000)
actual_output = self.dogstream.check(self.config, move_end=False)
self.assertEquals({}, actual_output)
os.chmod(self.log_file.name, 0600)
actual_output = self.dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
def test_dogstream_log_path_globbing(self):
"""Make sure that globbed dogstream logfile matching works."""
# Create a tmpfile to serve as a prefix for the other temporary
# files we'll be globbing.
first_tmpfile = NamedTemporaryFile()
tmp_fprefix = os.path.basename(first_tmpfile.name)
all_tmp_filenames = set([first_tmpfile.name])
# We stick the file objects in here to avoid garbage collection (and
# tmpfile deletion). Not sure why this was happening, but it's working
# with this hack in.
avoid_gc = []
for i in range(3):
new_tmpfile = NamedTemporaryFile(prefix=tmp_fprefix)
all_tmp_filenames.add(new_tmpfile.name)
avoid_gc.append(new_tmpfile)
dogstream_glob = os.path.join(gettempdir(), tmp_fprefix + '*')
paths = Dogstreams._get_dogstream_log_paths(dogstream_glob)
self.assertEqual(set(paths), all_tmp_filenames)
def test_dogstream_function_plugin(self):
"""Ensure that non-class-based stateful plugins work"""
log_data = [
'test.metric.accumulator 1000000000 1 metric_type=counter',
'test.metric.accumulator 1100000000 1 metric_type=counter'
]
expected_output = {
"dogstream": [
('test.metric.accumulator', 1000000000, 1, self.counter),
('test.metric.accumulator', 1100000000, 2, self.counter)]
}
self._write_log(log_data)
statedog = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:parse_function_plugin'.format(self.log_file.name, __name__)})
actual_output = statedog.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
def test_dogstream_new_plugin(self):
"""Ensure that class-based stateful plugins work"""
log_data = [
'test.metric.accumulator 1000000000 1 metric_type=counter',
'test.metric.accumulator 1100000000 1 metric_type=counter'
]
expected_output = {
"dogstream": [
('foo.bar:test.metric.accumulator', 1000000000, 1, self.counter),
('foo.bar:test.metric.accumulator', 1100000000, 2, self.counter)]
}
self._write_log(log_data)
statedog = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:ParseClassPlugin:foo:bar'.format(self.log_file.name, __name__)})
actual_output = statedog.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
def test_dogstream_events(self):
log_data = [
'2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)',
'2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)',
'2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)',
'2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)',
]
expected_output = {
"dogstreamEvents": [
{
"timestamp": 1336999561,
"alert_type": "error",
"host": "host0",
"msg_title": "host0 is down (broke its collarbone)",
"msg_text": "2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999687,
"alert_type": "error",
"host": "host1",
"msg_title": "host1 is down (got a bloody nose)",
"msg_text": "2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999923,
"alert_type": "success",
"host": "host0",
"msg_title": "host0 is up (collarbone healed)",
"msg_text": "2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1337000349,
"alert_type": "success",
"host": "host1",
"msg_title": "host1 is up (nose stopped bleeding)",
"msg_text": "2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
]
}
self._write_log(log_data)
dogstream = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:parse_events'.format(self.log_file.name, __name__)})
actual_output = dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
def test_dogstream_events_validation(self):
log_data = [
{"msg_title": "title", "timestamp": 1336999561},
{"msg_text": "body", "timestamp": 1336999561},
{"none of the above": "should get filtered out", "timestamp": 1336999561},
]
expected_output = {
"dogstreamEvents": [
{
"timestamp": 1336999561,
"msg_title": "title",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
{
"timestamp": 1336999561,
"msg_text": "body",
"event_type": EventDefaults.EVENT_TYPE,
"aggregation_key": EventDefaults.EVENT_OBJECT,
"event_object": EventDefaults.EVENT_OBJECT,
},
]
}
self._write_log([repr(d) for d in log_data])
dogstream = Dogstreams.init(self.logger, {'dogstreams': '{0}:{1}:repr_event_parser'.format(self.log_file.name, __name__)})
actual_output = dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
def test_cassandra_parser(self):
from dogstream import cassandra, common
log_data = """ INFO [CompactionExecutor:1594] 2012-05-12 21:05:12,924 Saved test_data-Encodings-KeyCache (86400 items) in 85 ms
INFO [CompactionExecutor:1595] 2012-05-12 21:05:15,144 Saved test_data-Metrics-KeyCache (86400 items) in 96 ms
INFO [CompactionExecutor:1596] 2012-05-12 21:10:48,058 Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]
INFO [CompactionExecutor:1596] 2012-05-12 21:10:54,851 Compacted to [/var/cassandra/a-hc-65-Data.db,]. 102,079,134 to 101,546,397
INFO [CompactionExecutor:1598] 2012-05-12 22:05:04,313 Saved test_data-ResourcesMetadata-KeyCache (1 items) in 10 ms
INFO [CompactionExecutor:1599] 2012-05-12 22:05:14,813 Saved test_data-Encodings-KeyCache (86400 items) in 83 ms
INFO [CompactionExecutor:1630] 2012-05-13 13:05:44,963 Saved test_data-Metrics-KeyCache (86400 items) in 77 ms
INFO [CompactionExecutor:1631] 2012-05-13 13:15:01,923 Nothing to compact in data_log. Use forceUserDefinedCompaction if you wish to force compaction of single sstables (e.g. for tombstone collection)
INFO [CompactionExecutor:1632] 2012-05-13 13:15:01,927 Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]
INFO [CompactionExecutor:1632] 2012-05-13 13:27:17,685 Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally
INFO [CompactionExecutor:34] 2012-05-14 18:00:41,281 Saved test_data-Encodings-KeyCache (86400 items) in 78 ms
INFO 13:27:17,685 Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally
"""
alert_type = cassandra.ALERT_TYPES["INFO"]
event_type = cassandra.EVENT_TYPE
event_object = EventDefaults.EVENT_OBJECT
expected_output = {
"dogstreamEvents": [
{
"timestamp": cassandra.parse_date("2012-05-12 21:10:48,058"),
"msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]"[0:common.MAX_TITLE_LEN],
"msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]",
"alert_type": alert_type,
"auto_priority": 0,
"event_type": event_type,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date("2012-05-12 21:10:54,851"),
"msg_title": "Compacted to [/var/cassandra/a-hc-65-Data.db,]. 102,079,134 to 101,546,397",
"alert_type": alert_type,
"auto_priority": 0,
"event_type": event_type,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date("2012-05-13 13:15:01,927"),
"msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]"[0:common.MAX_TITLE_LEN],
"msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date("2012-05-13 13:27:17,685"),
"msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
}, {
"timestamp": cassandra.parse_date(datetime.utcnow().strftime("%Y-%m-%d") + " 13:27:17,685"),
"msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
"alert_type": alert_type,
"event_type": event_type,
"auto_priority": 0,
"aggregation_key": event_object,
"event_object": event_object,
},
]
}
self._write_log(log_data.split("\n"))
dogstream = Dogstreams.init(self.logger, {'dogstreams': '%s:dogstream.cassandra:parse_cassandra' % self.log_file.name})
actual_output = dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
def test_supervisord_parser(self):
from dogstream import supervisord_log
log_data = """2012-07-16 22:30:48,335 INFO spawned: 'monitor' with pid 20216
2012-07-14 03:02:47,325 INFO success: foo_bar entered RUNNING state, process has stayed up for > than 2 seconds (startsecs)
2012-07-17 02:53:04,600 CRIT Server 'inet_http_server' running without any HTTP authentication checking
2012-07-14 04:54:34,193 WARN received SIGTERM indicating exit request
"""
event_type = supervisord_log.EVENT_TYPE
expected_output = {
"dogstreamEvents": [
{
"alert_type": "info", "event_type": event_type,
"aggregation_key": "monitor",
"event_object": "monitor",
"msg_title": "spawned: 'monitor' with pid 20216",
"timestamp": int(time.mktime(datetime(2012, 7, 16, 22, 30, 48).timetuple())),
}, {
"alert_type": "success", "event_type": event_type,
"aggregation_key": "foo_bar",
"event_object": "foo_bar",
"msg_title": "success: foo_bar entered RUNNING state, "
"process has stayed up for > than 2 seconds (startsecs)",
"timestamp": int(time.mktime(datetime(2012, 7, 14, 3, 2, 47).timetuple())),
}, {
"alert_type": "error", "event_type": event_type,
"aggregation_key": "inet_http_server",
"event_object": "inet_http_server",
"msg_title": "Server 'inet_http_server' running without any HTTP authentication checking",
"timestamp": int(time.mktime(datetime(2012, 7, 17, 2, 53, 4).timetuple())),
}, {
"alert_type": "warning", "event_type": event_type,
"aggregation_key": "SIGTERM",
"event_object": "SIGTERM",
"msg_title": "received SIGTERM indicating exit request",
"timestamp": int(time.mktime(datetime(2012, 7, 14, 4, 54, 34).timetuple())),
},
]
}
self._write_log(log_data.split("\n"))
dogstream = Dogstreams.init(self.logger, {'dogstreams': '%s:dogstream.supervisord_log:parse_supervisord' % self.log_file.name})
actual_output = dogstream.check(self.config, move_end=False)
self.assertEquals(expected_output, actual_output)
|
8112fa618345a2771b995ba9ea2eb43ea606ac7a
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/tools/Redfish-Interface-Emulator/api_emulator/redfish/templates/session.py
|
2fbeff4e4264dc95d9974788f447a6394d45b5a5
|
[
"BSD-3-Clause"
] |
permissive
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
session.py
|
# Copyright Notice:
# Copyright 2017-2019 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Interface-Emulator/blob/master/LICENSE.md
# Session Template File
import copy
import strgen
_TEMPLATE = \
{
"@Redfish.Copyright":"Copyright 2014-2021 DMTF. All rights reserved.",
"@odata.id": "{rb}SessionService/Sessions/{id}",
"@odata.type": "#Session.v1_3_0.Session",
"Id": "{id}",
"Name": "Session {id}",
"Description": "Manager User Session",
"UserName": "Administrator",
}
def get_Session_instance(wildcards):
"""
Instantiate and format the template
Arguments:
wildcard - A dictionary of wildcards strings and their replacement values
"""
c = copy.deepcopy(_TEMPLATE)
c['@odata.context'] = c['@odata.context'].format(**wildcards)
c['@odata.id'] = c['@odata.id'].format(**wildcards)
c['Id'] = c['Id'].format(**wildcards)
c['Description'] = c['Description'].format(**wildcards)
c['Name'] = c['Name'].format(**wildcards)
c['UserName'] = c['UserName'].format(**wildcards)
return c
|
e5f6f55ac74e1d3d94781908bbf87f2e74fd10eb
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/providers/ftp/operators/ftp.py
|
4489839d8f493b770575699592ee4f6cdab522a7
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 8,171
|
py
|
ftp.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains FTP operator."""
from __future__ import annotations
import os
import socket
from ftplib import FTP_PORT
from functools import cached_property
from pathlib import Path
from typing import Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.ftp.hooks.ftp import FTPHook, FTPSHook
class FTPOperation:
"""Operation that can be used with FTP."""
PUT = "put"
GET = "get"
class FTPFileTransmitOperator(BaseOperator):
"""
FTPFileTransmitOperator for transferring files from remote host to local or vice a versa.
This operator uses an FTPHook to open ftp transport channel that serve as basis for file transfer.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FTPFileTransmitOperator`
:param ftp_conn_id: :ref:`ftp connection id<howto/connection:ftp>`
from airflow Connections.
:param local_filepath: local file path to get or put. (templated)
:param remote_filepath: remote file path to get or put. (templated)
:param operation: specify operation 'get' or 'put', defaults to put
:param create_intermediate_dirs: create missing intermediate directories when
copying from remote to local and vice-versa. Default is False.
Example: The following task would copy ``file.txt`` to the remote host
at ``/tmp/tmp1/tmp2/`` while creating ``tmp``,``tmp1`` and ``tmp2`` if they
don't exist. If the ``create_intermediate_dirs`` parameter is not passed it would error
as the directory does not exist. ::
put_file = FTPFileTransmitOperator(
task_id="test_ftp",
ftp_conn_id="ftp_default",
local_filepath="/tmp/file.txt",
remote_filepath="/tmp/tmp1/tmp2/file.txt",
operation="put",
create_intermediate_dirs=True,
dag=dag
)
"""
template_fields: Sequence[str] = ("local_filepath", "remote_filepath")
def __init__(
self,
*,
ftp_conn_id: str = "ftp_default",
local_filepath: str | list[str],
remote_filepath: str | list[str],
operation: str = FTPOperation.PUT,
create_intermediate_dirs: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.ftp_conn_id = ftp_conn_id
self.operation = operation
self.create_intermediate_dirs = create_intermediate_dirs
self.local_filepath = local_filepath
self.remote_filepath = remote_filepath
@cached_property
def hook(self) -> FTPHook:
"""Create and return an FTPHook."""
return FTPHook(ftp_conn_id=self.ftp_conn_id)
def execute(self, context: Any) -> str | list[str] | None:
file_msg = None
if isinstance(self.local_filepath, str):
local_filepath_array = [self.local_filepath]
else:
local_filepath_array = self.local_filepath
if isinstance(self.remote_filepath, str):
remote_filepath_array = [self.remote_filepath]
else:
remote_filepath_array = self.remote_filepath
if len(local_filepath_array) != len(remote_filepath_array):
raise ValueError(
f"{len(local_filepath_array)} paths in local_filepath "
f"!= {len(remote_filepath_array)} paths in remote_filepath"
)
if self.operation.lower() not in [FTPOperation.GET, FTPOperation.PUT]:
raise TypeError(
f"Unsupported operation value {self.operation}, "
f"expected {FTPOperation.GET} or {FTPOperation.PUT}."
)
for _local_filepath, _remote_filepath in zip(local_filepath_array, remote_filepath_array):
if self.operation.lower() == FTPOperation.GET:
local_folder = os.path.dirname(_local_filepath)
if self.create_intermediate_dirs:
Path(local_folder).mkdir(parents=True, exist_ok=True)
file_msg = f"from {_remote_filepath} to {_local_filepath}"
self.log.info("Starting to transfer %s", file_msg)
self.hook.retrieve_file(_remote_filepath, _local_filepath)
else:
remote_folder = os.path.dirname(_remote_filepath)
if self.create_intermediate_dirs:
self.hook.create_directory(remote_folder)
file_msg = f"from {_local_filepath} to {_remote_filepath}"
self.log.info("Starting to transfer file %s", file_msg)
self.hook.store_file(_remote_filepath, _local_filepath)
return self.local_filepath
def get_openlineage_facets_on_start(self):
"""
Returns OpenLineage datasets.
Dataset will have the following structure:
input: file://hostname/path
output file://<conn.host>:<conn.port>/path.
"""
from openlineage.client.run import Dataset
from airflow.providers.openlineage.extractors import OperatorLineage
scheme = "file"
local_host = socket.gethostname()
try:
local_host = socket.gethostbyname(local_host)
except Exception as e:
self.log.warning(
f"Failed to resolve local hostname. Using the hostname got by socket.gethostbyname() without resolution. {e}", # noqa: E501
exc_info=True,
)
conn = self.hook.get_conn()
remote_host = conn.host
remote_port = conn.port
if isinstance(self.local_filepath, str):
local_filepath = [self.local_filepath]
else:
local_filepath = self.local_filepath
if isinstance(self.remote_filepath, str):
remote_filepath = [self.remote_filepath]
else:
remote_filepath = self.remote_filepath
local_datasets = [
Dataset(namespace=self._get_namespace(scheme, local_host, None, path), name=path)
for path in local_filepath
]
remote_datasets = [
Dataset(namespace=self._get_namespace(scheme, remote_host, remote_port, path), name=path)
for path in remote_filepath
]
if self.operation.lower() == FTPOperation.GET:
inputs = remote_datasets
outputs = local_datasets
else:
inputs = local_datasets
outputs = remote_datasets
return OperatorLineage(
inputs=inputs,
outputs=outputs,
)
def _get_namespace(self, scheme, host, port, path) -> str:
port = port or FTP_PORT
authority = f"{host}:{port}"
return f"{scheme}://{authority}"
class FTPSFileTransmitOperator(FTPFileTransmitOperator):
"""
FTPSFileTransmitOperator for transferring files from remote host to local or vice a versa.
This operator uses an FTPSHook to open ftps transport channel that serve as basis for file transfer.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FTPSFileTransmitOperator`
"""
@cached_property
def hook(self) -> FTPSHook:
"""Create and return an FTPSHook."""
return FTPSHook(ftp_conn_id=self.ftp_conn_id)
|
4178005be32014aa1dc23037ddb528c7f5af6874
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/common/checks_infra/solvers/attribute_solvers/number_of_words_greater_than_or_equal_attribute_solver.py
|
ef4d2d3ea19464af1e9711e018a441283d16f921
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 904
|
py
|
number_of_words_greater_than_or_equal_attribute_solver.py
|
from typing import Optional, Any, Dict
from checkov.common.checks_infra.solvers.attribute_solvers.base_number_of_words_attribute_solver import \
BaseNumberOfWordsAttributeSolver
from checkov.common.graph.checks_infra.enums import Operators
class NumberOfWordsGreaterThanOrEqualAttributeSolver(BaseNumberOfWordsAttributeSolver):
operator = Operators.NUMBER_OF_WORDS_GREATER_THAN_OR_EQUAL # noqa: CCE003 # a static attribute
def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:
attr = vertex.get(attribute) # type:ignore[arg-type] # due to attribute can be None
if not self._validate_vertex_value(attr):
return False
num_of_words = self._get_number_of_words(attr)
value_numeric = self._numerize_value()
if value_numeric is None:
return False
return num_of_words >= value_numeric
|
514fcc51dad8e155b96c6ce633ec442da5f28a80
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/string_strip.py
|
971a4aae539b17d8ed4ed9fa2c677f5f574ea97e
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 988
|
py
|
string_strip.py
|
print("".strip())
print(" \t\n\r\v\f".strip())
print(" T E S T".strip())
print("abcabc".strip("ce"))
print("aaa".strip("b"))
print("abc efg ".strip("g a"))
print(' spacious '.lstrip())
print('www.example.com'.lstrip('cmowz.'))
print(' spacious '.rstrip())
print('mississippi'.rstrip('ipz'))
print(b'mississippi'.rstrip(b'ipz'))
try:
print(b'mississippi'.rstrip('ipz'))
except TypeError:
print("TypeError")
try:
print('mississippi'.rstrip(b'ipz'))
except TypeError:
print("TypeError")
# single-char subj string used to give a problem
print("a".strip())
print("a".lstrip())
print("a".rstrip())
print(" a".strip())
print(" a".lstrip())
print(" a".rstrip())
print("a ".strip())
print("a ".lstrip())
print("a ".rstrip())
# \0 used to give a problem
print("\0abc\0".strip())
print("\0abc\0".lstrip())
print("\0abc\0".rstrip())
print("\0abc\0".strip("\0"))
# Test that stripping unstrippable string returns original object
s = "abc"
print(id(s.strip()) == id(s))
|
4834075104b7848319f9c93780e12a29c3c99e4b
|
f251667b6c1e708c3ff00b52d090f53763a0a90d
|
/setup.py
|
af08081bb9ad441eb2b338d01144e92634bcc511
|
[
"CC-BY-SA-4.0",
"MIT"
] |
permissive
|
karask/python-bitcoin-utils
|
e0d89f0b28ef29a6b19db77e02eb1170cdf1a47c
|
0c419fcc51ac7e245d412da48ad3690735ff88fa
|
refs/heads/master
| 2023-08-17T19:30:31.826879
| 2023-08-03T11:06:55
| 2023-08-03T11:06:55
| 149,975,129
| 186
| 68
|
MIT
| 2022-04-12T07:43:45
| 2018-09-23T10:47:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
setup.py
|
from setuptools import setup
from bitcoinutils import __version__
#with open('requirements.txt') as f:
# requirements = f.read().splitlines()
#install_reqs = parse_requirements('requirements.txt', session=False)
#requirements = [str(ir.req) for ir in install_reqs]
with open('README.rst') as readme:
long_description = readme.read()
setup(name='bitcoin-utils',
version=__version__,
description='Bitcoin utility functions',
long_description=long_description,
author='Konstantinos Karasavvas',
author_email='kkarasavvas@gmail.com',
url='https://github.com/karask/python-bitcoin-utils',
license='MIT',
keywords='bitcoin library utilities tools',
install_requires=[
'base58check>=1.0.2,<2.0',
'ecdsa==0.17.0',
'sympy>=1.2,<2.0',
'python-bitcoinrpc>=1.0,<2.0',
'hdwallet==2.2.1'
],
packages=['bitcoinutils'],
#package_data={
# 'bitcoinutils': ['requirements.txt']
#},
#include_package_data=True,
zip_safe=False
)
|
caaf57c16c05b91d11588b8b361bf0d78d3ca437
|
2e038c642350e9a29bcd845b2f922f9c017fa7d8
|
/src/westpa/oldtools/aframe/mcbs.py
|
f42a855ab043e4a6a3dcd77e0af2b2d9b23cf7e3
|
[
"MIT"
] |
permissive
|
westpa/westpa
|
e8e0952bdbe9a95f06eca07762e1e9372156dd9a
|
85ed1c54159d639d2fcb9e23c45f93743bfed2e0
|
refs/heads/westpa2
| 2023-09-01T11:21:44.944424
| 2023-08-11T21:56:40
| 2023-08-11T21:56:40
| 24,576,160
| 181
| 66
|
MIT
| 2023-09-14T16:46:54
| 2014-09-29T02:04:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,928
|
py
|
mcbs.py
|
'''
Tools for Monte Carlo bootstrap error analysis
'''
import logging
import math
import numpy as np
import westpa
from westpa.oldtools.aframe import AnalysisMixin
log = logging.getLogger(__name__)
class MCBSMixin(AnalysisMixin):
def __init__(self):
super().__init__()
self.mcbs_alpha = None
self.mcbs_nsets = None
self.mcbs_display_confidence = None
def add_args(self, parser, upcall=True):
if upcall:
try:
upfunc = super().add_args
except AttributeError:
pass
else:
upfunc(parser)
group = parser.add_argument_group('Monte Carlo bootstrap options')
group.add_argument(
'--confidence',
dest='mcbs_confidence',
type=float,
default=0.95,
metavar='P',
help='''Construct a confidence interval of width P (default: 0.95=95%%).''',
)
group.add_argument(
'--bssize',
dest='mcbs_nsets',
type=int,
metavar='NSETS',
help='''Use NSETS synthetic data sets to calculate confidence intervals (default:
calculated based on confidence level, but not less than 1000).''',
)
def process_args(self, args, upcall=True):
self.mcbs_alpha = 1 - args.mcbs_confidence
self.mcbs_nsets = args.mcbs_size if args.mcbs_nsets else min(1000, calc_mcbs_nsets(self.mcbs_alpha))
self.mcbs_display_confidence = '{:.{cp}f}'.format(
100 * args.mcbs_confidence, cp=-int(math.floor(math.log10(self.mcbs_alpha))) - 2
)
westpa.rc.pstatus(
'Using bootstrap of {:d} sets to calculate {:s}% confidence interval (alpha={:g}).'.format(
self.mcbs_nsets, self.mcbs_display_confidence, self.mcbs_alpha
)
)
if upcall:
try:
upfunc = super().process_args
except AttributeError:
pass
else:
upfunc(args)
def calc_mcbs_nsets(self, alpha=None):
alpha = alpha or self.mcbs_alpha
return calc_mcbs_nsets(alpha)
def calc_ci_bound_indices(self, n_sets=None, alpha=None):
n_sets = n_sets or self.mcbs_nsets
alpha = alpha or self.mcbs_alpha
return calc_ci_bound_indices(n_sets, alpha)
ciinfo_dtype = np.dtype([('expectation', np.float64), ('ci_lower', np.float64), ('ci_upper', np.float64)])
def calc_mcbs_nsets(alpha):
'''Return a bootstrap data set size appropriate for the given confidence level.'''
return int(10 ** (math.ceil(-math.log10(alpha)) + 1))
def calc_ci_bound_indices(n_sets, alpha):
return (int(math.floor(n_sets * alpha / 2)), int(math.ceil(n_sets * (1 - alpha / 2))))
def bootstrap_ci_ll(estimator, data, alpha, n_sets, storage, sort, eargs=(), ekwargs={}, fhat=None):
'''Low-level routine for calculating bootstrap error estimates. Arguments and return values are as those for
``bootstrap_ci``, except that no argument is optional except additional arguments for the estimator (``eargs``, ``ekwargs``).
``data`` must be an array (or subclass), and an additional array ``storage`` must be provided, which
must be appropriately shaped and typed to hold ``n_sets`` results from ``estimator``. Further, if the
value ``fhat`` of the estimator must be pre-calculated to allocate ``storage``, then its value may be
passed; otherwise, ``estimator(data,*eargs,**kwargs)`` will be called to calculate it.'''
if fhat is None:
fhat = estimator(data, *eargs, **ekwargs)
dlen = len(data)
for iset in range(n_sets):
indices = np.random.randint(dlen, size=(dlen,))
storage[iset] = estimator(data[indices], *eargs, **ekwargs)
synth_sorted = sort(storage)
lbi = int(math.floor(n_sets * alpha / 2))
ubi = int(math.ceil(n_sets * (1 - alpha / 2)))
lb = synth_sorted[lbi]
ub = synth_sorted[ubi]
try:
return (fhat, lb, ub, ub - lb, abs((ub - lb) / fhat) if fhat else 0, max(ub - fhat, fhat - lb))
finally:
del fhat, lb, ub, indices
def bootstrap_ci(estimator, data, alpha, n_sets=None, sort=np.msort, eargs=(), ekwargs={}):
'''Perform a Monte Carlo bootstrap of a (1-alpha) confidence interval for the given ``estimator``.
Returns (fhat, ci_lower, ci_upper), where fhat is the result of ``estimator(data, *eargs, **ekwargs)``,
and ``ci_lower`` and ``ci_upper`` are the lower and upper bounds of the surrounding confidence
interval, calculated by calling ``estimator(syndata, *eargs, **ekwargs)`` on each synthetic data
set ``syndata``. If ``n_sets`` is provided, that is the number of synthetic data sets generated,
otherwise an appropriate size is selected automatically (see ``calc_mcbs_nsets()``).
``sort``, if given, is applied to sort the results of calling ``estimator`` on each
synthetic data set prior to obtaining the confidence interval. This function must sort
on the last index.
Individual entries in synthetic data sets are selected by the first index of ``data``, allowing this
function to be used on arrays of multidimensional data.
Returns (fhat, lb, ub, ub-lb, abs((ub-lb)/fhat), and max(ub-fhat,fhat-lb)) (that is, the estimated value, the
lower and upper bounds of the confidence interval, the width of the confidence interval, the relative
width of the confidence interval, and the symmetrized error bar of the confidence interval).'''
data = np.asanyarray(data)
fhat = np.squeeze(estimator(data, *eargs, **ekwargs))
n_sets = n_sets or calc_mcbs_nsets(alpha)
fsynth = np.empty((n_sets,), dtype=fhat.dtype)
try:
return bootstrap_ci_ll(estimator, data, alpha, n_sets or calc_mcbs_nsets(alpha), fsynth, sort, eargs, ekwargs, fhat)
finally:
del fsynth
|
cfebcb99b3aa558075648b07af2bdaa949c5ff73
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level01.약수의_개수와_덧셈/Go-yj.py
|
3479dc5ab3922effd8ed14a0dd378c6e7af4ca21
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
Go-yj.py
|
'''
링크 : https://programmers.co.kr/learn/courses/30/lessons/77884
문제 : 약수의 개수와 덧셈
약수의 개수가 홀수일때는 제곱근이 정수일때이기 때문에 이를 판별하여 홀, 짝을 구해줬습니다.
'''
import math
def solution(left, right):
answer = 0
for i in range(left, right+1) :
num = math.sqrt(i)
if int(num) == num :
answer -= i
else : answer += i
return answer
|
7fdd313d94c60f4d8e34e1edf26e2666a0917f48
|
d38ab28cf6ee680b5a82f37e7841d31617750da4
|
/Examples/ImageGridManipulation/ImageGridManipulation.py
|
d537a1f9ca36e7a85f5d12720647755d925514e9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SimpleITK/SimpleITK
|
cdd9f417acc7f7fe20b006a75dc483d6bb6d9b20
|
cfb40ba1149ba9f186793ccdd206f7179c8ba7a3
|
refs/heads/master
| 2023-09-01T15:01:04.024343
| 2023-08-31T19:09:36
| 2023-08-31T19:09:36
| 1,069,177
| 764
| 216
|
Apache-2.0
| 2023-09-13T17:48:23
| 2010-11-10T18:56:04
|
SWIG
|
UTF-8
|
Python
| false
| false
| 2,382
|
py
|
ImageGridManipulation.py
|
#!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
import SimpleITK as sitk
import sys
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " <input-1> <input-2>")
sys.exit(1)
# Two vector images of same pixel type and dimension expected
image_1 = sitk.ReadImage(sys.argv[1])
image_2 = sitk.ReadImage(sys.argv[2])
# Join two N-D Vector images to form an (N+1)-D image
join = sitk.JoinSeriesImageFilter()
joined_image = join.Execute(image_1, image_2)
# Extract first three channels of joined image (assuming RGB)
select = sitk.VectorIndexSelectionCastImageFilter()
channel1_image = select.Execute(joined_image, 0, sitk.sitkUInt8)
channel2_image = select.Execute(joined_image, 1, sitk.sitkUInt8)
channel3_image = select.Execute(joined_image, 2, sitk.sitkUInt8)
# Recompose image (should be same as joined_image)
compose = sitk.ComposeImageFilter()
composed_image = compose.Execute(
channel1_image, channel2_image, channel3_image
)
# Select same subregion using image slicing operator
sliced_image = composed_image[100:400, 100:400, 0]
# Select same subregion using ExtractImageFilter
extract = sitk.ExtractImageFilter()
extract.SetSize([300, 300, 0])
extract.SetIndex([100, 100, 0])
extracted_image = extract.Execute(composed_image)
# Select same sub-region using CropImageFilter (NOTE: CropImageFilter cannot
# reduce dimensions unlike ExtractImageFilter, so cropped_image is a three
# dimensional image with depth of 1)
crop = sitk.CropImageFilter()
crop.SetLowerBoundaryCropSize([100, 100, 0])
crop.SetUpperBoundaryCropSize(
[composed_image.GetWidth() - 400, composed_image.GetHeight() - 400, 1]
)
cropped_image = crop.Execute(composed_image)
|
54a8f5b8fc283c3033e644e4e1e914be459184b7
|
e993a7972529f60210d9dd6d7c4097c62c37bcdf
|
/eval/unconstrained/metrics/kid.py
|
f56c63f34953cdd8304711d8b88c8009a1bc19fc
|
[
"MIT"
] |
permissive
|
GuyTevet/motion-diffusion-model
|
64756013105a80ea2a3180a73ac86519b361e53b
|
8139dda55d90a58aa5a257ebf159b2ecfb78c632
|
refs/heads/main
| 2023-09-01T05:00:14.156745
| 2023-06-06T23:42:33
| 2023-06-06T23:42:33
| 543,082,997
| 2,302
| 265
|
MIT
| 2023-08-29T09:27:54
| 2022-09-29T11:24:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,848
|
py
|
kid.py
|
import torch
import numpy as np
from tqdm import tqdm
from sklearn.metrics.pairwise import polynomial_kernel
import sys
# from: https://github.com/abdulfatir/gan-metrics-pytorch/blob/master/kid_score.py
def polynomial_mmd_averages(codes_g, codes_r, n_subsets=50, subset_size=1000,
ret_var=True, output=sys.stdout, **kernel_args):
m = min(codes_g.shape[0], codes_r.shape[0])
mmds = np.zeros(n_subsets)
if ret_var:
vars = np.zeros(n_subsets)
choice = np.random.choice
replace = subset_size < len(codes_g)
with tqdm(range(n_subsets), desc='MMD', file=output, disable=True) as bar:
for i in bar:
g = codes_g[choice(len(codes_g), subset_size, replace=replace)]
r = codes_r[choice(len(codes_r), subset_size, replace=replace)]
o = polynomial_mmd(g, r, **kernel_args, var_at_m=m, ret_var=ret_var)
if ret_var:
mmds[i], vars[i] = o
else:
mmds[i] = o
bar.set_postfix({'mean': mmds[:i+1].mean()})
return (mmds, vars) if ret_var else mmds
def polynomial_mmd(codes_g, codes_r, degree=3, gamma=None, coef0=1,
var_at_m=None, ret_var=True):
# use k(x, y) = (gamma <x, y> + coef0)^degree
# default gamma is 1 / dim
X = codes_g
Y = codes_r
K_XX = polynomial_kernel(X, degree=degree, gamma=gamma, coef0=coef0)
K_YY = polynomial_kernel(Y, degree=degree, gamma=gamma, coef0=coef0)
K_XY = polynomial_kernel(X, Y, degree=degree, gamma=gamma, coef0=coef0)
return _mmd2_and_variance(K_XX, K_XY, K_YY,
var_at_m=var_at_m, ret_var=ret_var)
def _mmd2_and_variance(K_XX, K_XY, K_YY, unit_diagonal=False,
mmd_est='unbiased', block_size=1024,
var_at_m=None, ret_var=True):
# based on
# https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/mmd.py
# but changed to not compute the full kernel matrix at once
m = K_XX.shape[0]
assert K_XX.shape == (m, m)
assert K_XY.shape == (m, m)
assert K_YY.shape == (m, m)
if var_at_m is None:
var_at_m = m
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if unit_diagonal:
diag_X = diag_Y = 1
sum_diag_X = sum_diag_Y = m
sum_diag2_X = sum_diag2_Y = m
else:
diag_X = np.diagonal(K_XX)
diag_Y = np.diagonal(K_YY)
sum_diag_X = diag_X.sum()
sum_diag_Y = diag_Y.sum()
sum_diag2_X = _sqn(diag_X)
sum_diag2_Y = _sqn(diag_Y)
Kt_XX_sums = K_XX.sum(axis=1) - diag_X
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
K_XY_sums_1 = K_XY.sum(axis=1)
Kt_XX_sum = Kt_XX_sums.sum()
Kt_YY_sum = Kt_YY_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
if mmd_est == 'biased':
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
assert mmd_est in {'unbiased', 'u-statistic'}
mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m-1))
if mmd_est == 'unbiased':
mmd2 -= 2 * K_XY_sum / (m * m)
else:
mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m-1))
if not ret_var:
return mmd2
Kt_XX_2_sum = _sqn(K_XX) - sum_diag2_X
Kt_YY_2_sum = _sqn(K_YY) - sum_diag2_Y
K_XY_2_sum = _sqn(K_XY)
dot_XX_XY = Kt_XX_sums.dot(K_XY_sums_1)
dot_YY_YX = Kt_YY_sums.dot(K_XY_sums_0)
m1 = m - 1
m2 = m - 2
zeta1_est = (
1 / (m * m1 * m2) * (
_sqn(Kt_XX_sums) - Kt_XX_2_sum + _sqn(Kt_YY_sums) - Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 1 / (m * m * m1) * (
_sqn(K_XY_sums_1) + _sqn(K_XY_sums_0) - 2 * K_XY_2_sum)
- 2 / m**4 * K_XY_sum**2
- 2 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 2 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
zeta2_est = (
1 / (m * m1) * (Kt_XX_2_sum + Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 2 / (m * m) * K_XY_2_sum
- 2 / m**4 * K_XY_sum**2
- 4 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 4 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
var_est = (4 * (var_at_m - 2) / (var_at_m * (var_at_m - 1)) * zeta1_est
+ 2 / (var_at_m * (var_at_m - 1)) * zeta2_est)
return mmd2, var_est
def _sqn(arr):
flat = np.ravel(arr)
return flat.dot(flat)
def calculate_kid(real_activations, generated_activations):
kid_values = polynomial_mmd_averages(real_activations, generated_activations, n_subsets=100)
results = (kid_values[0].mean(), kid_values[0].std())
return results
|
313e30746c37d0459b985c33f4987d28655ac4fb
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/webdriver/tests/classic/get_element_rect/get.py
|
942f119f43c9bb30eaefd52f3c793d7e47cfbfc4
|
[
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,057
|
py
|
get.py
|
import pytest
from webdriver import Element
from tests.support.asserts import assert_error, assert_success
from tests.support.helpers import element_rect
def get_element_rect(session, element_id):
return session.transport.send(
"GET",
"session/{session_id}/element/{element_id}/rect".format(
session_id=session.session_id,
element_id=element_id,
)
)
def test_no_top_browsing_context(session, closed_window):
original_handle, element = closed_window
response = get_element_rect(session, element.id)
assert_error(response, "no such window")
response = get_element_rect(session, "foo")
assert_error(response, "no such window")
session.window_handle = original_handle
response = get_element_rect(session, element.id)
assert_error(response, "no such element")
def test_no_browsing_context(session, closed_frame):
response = get_element_rect(session, "foo")
assert_error(response, "no such window")
def test_no_such_element_with_invalid_value(session):
element = Element(session, "foo")
response = get_element_rect(session, element.id)
assert_error(response, "no such element")
def test_no_such_element_with_shadow_root(session, get_test_page):
session.url = get_test_page()
element = session.find.css("custom-element", all=False)
result = get_element_rect(session, element.shadow_root.id)
assert_error(result, "no such element")
@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
def test_no_such_element_from_other_window_handle(session, inline, closed):
session.url = inline("<div id='parent'><p/>")
element = session.find.css("#parent", all=False)
new_handle = session.new_window()
if closed:
session.window.close()
session.window_handle = new_handle
response = get_element_rect(session, element.id)
assert_error(response, "no such element")
@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
def test_no_such_element_from_other_frame(session, get_test_page, closed):
session.url = get_test_page(as_frame=True)
frame = session.find.css("iframe", all=False)
session.switch_frame(frame)
element = session.find.css("div", all=False)
session.switch_frame("parent")
if closed:
session.execute_script("arguments[0].remove();", args=[frame])
response = get_element_rect(session, element.id)
assert_error(response, "no such element")
@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
def test_stale_element_reference(session, stale_element, as_frame):
element = stale_element("input#text", as_frame=as_frame)
result = get_element_rect(session, element.id)
assert_error(result, "stale element reference")
def test_basic(session, inline):
session.url = inline("<input>")
element = session.find.css("input", all=False)
result = get_element_rect(session, element.id)
assert_success(result, element_rect(session, element))
|
8a8a2906b10bad5605bd1c252388930653ad978a
|
55f6a9b8f90ae308a90739fd8f77f4e7cd10ff19
|
/spacy/tests/test_language.py
|
51eec32399c192e15ea012c0cd8451633fcdd0fd
|
[
"MIT"
] |
permissive
|
explosion/spaCy
|
cce07ee403aa398de7ba8941a2c11d22aea68021
|
3e4264899c3b12f8eabc5cd700146177a34824d0
|
refs/heads/master
| 2023-08-31T07:18:13.598768
| 2023-08-30T09:58:14
| 2023-08-30T09:58:14
| 21,467,110
| 26,348
| 4,983
|
MIT
| 2023-09-13T17:56:22
| 2014-07-03T15:15:40
|
Python
|
UTF-8
|
Python
| false
| false
| 26,993
|
py
|
test_language.py
|
import itertools
import logging
from unittest import mock
import pytest
from thinc.api import CupyOps, NumpyOps, get_current_ops
import spacy
from spacy.lang.de import German
from spacy.lang.en import English
from spacy.language import Language
from spacy.scorer import Scorer
from spacy.tokens import Doc, Span
from spacy.training import Example
from spacy.util import find_matching_language, ignore_error, raise_error, registry
from spacy.vocab import Vocab
from .util import add_vecs_to_vocab, assert_docs_equal
try:
import torch
# Ensure that we don't deadlock in multiprocessing tests.
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
except ImportError:
pass
def evil_component(doc):
if "2" in doc.text:
raise ValueError("no dice")
return doc
def perhaps_set_sentences(doc):
if not doc.text.startswith("4"):
doc[-1].is_sent_start = True
return doc
def assert_sents_error(doc):
if not doc.has_annotation("SENT_START"):
raise ValueError("no sents")
return doc
def warn_error(proc_name, proc, docs, e):
logger = logging.getLogger("spacy")
logger.warning("Trouble with component %s.", proc_name)
@pytest.fixture
def nlp():
nlp = Language(Vocab())
textcat = nlp.add_pipe("textcat")
for label in ("POSITIVE", "NEGATIVE"):
textcat.add_label(label)
nlp.initialize()
return nlp
def test_language_update(nlp):
text = "hello world"
annots = {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}
wrongkeyannots = {"LABEL": True}
doc = Doc(nlp.vocab, words=text.split(" "))
example = Example.from_dict(doc, annots)
nlp.update([example])
# Not allowed to call with just one Example
with pytest.raises(TypeError):
nlp.update(example)
# Update with text and dict: not supported anymore since v.3
with pytest.raises(TypeError):
nlp.update((text, annots))
# Update with doc object and dict
with pytest.raises(TypeError):
nlp.update((doc, annots))
# Create examples badly
with pytest.raises(ValueError):
example = Example.from_dict(doc, None)
with pytest.raises(KeyError):
example = Example.from_dict(doc, wrongkeyannots)
def test_language_evaluate(nlp):
text = "hello world"
annots = {"doc_annotation": {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}}
doc = Doc(nlp.vocab, words=text.split(" "))
example = Example.from_dict(doc, annots)
scores = nlp.evaluate([example])
assert scores["speed"] > 0
# test with generator
scores = nlp.evaluate(eg for eg in [example])
assert scores["speed"] > 0
# Not allowed to call with just one Example
with pytest.raises(TypeError):
nlp.evaluate(example)
# Evaluate with text and dict: not supported anymore since v.3
with pytest.raises(TypeError):
nlp.evaluate([(text, annots)])
# Evaluate with doc object and dict
with pytest.raises(TypeError):
nlp.evaluate([(doc, annots)])
with pytest.raises(TypeError):
nlp.evaluate([text, annots])
def test_evaluate_no_pipe(nlp):
"""Test that docs are processed correctly within Language.pipe if the
component doesn't expose a .pipe method."""
@Language.component("test_evaluate_no_pipe")
def pipe(doc):
return doc
text = "hello world"
annots = {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}
nlp = Language(Vocab())
doc = nlp(text)
nlp.add_pipe("test_evaluate_no_pipe")
nlp.evaluate([Example.from_dict(doc, annots)])
def test_evaluate_textcat_multilabel(en_vocab):
"""Test that evaluate works with a multilabel textcat pipe."""
nlp = Language(en_vocab)
textcat_multilabel = nlp.add_pipe("textcat_multilabel")
for label in ("FEATURE", "REQUEST", "BUG", "QUESTION"):
textcat_multilabel.add_label(label)
nlp.initialize()
annots = {"cats": {"FEATURE": 1.0, "QUESTION": 1.0}}
doc = nlp.make_doc("hello world")
example = Example.from_dict(doc, annots)
scores = nlp.evaluate([example])
labels = nlp.get_pipe("textcat_multilabel").labels
for label in labels:
assert scores["cats_f_per_type"].get(label) is not None
for key in example.reference.cats.keys():
if key not in labels:
assert scores["cats_f_per_type"].get(key) is None
def test_evaluate_multiple_textcat_final(en_vocab):
"""Test that evaluate evaluates the final textcat component in a pipeline
with more than one textcat or textcat_multilabel."""
nlp = Language(en_vocab)
textcat = nlp.add_pipe("textcat")
for label in ("POSITIVE", "NEGATIVE"):
textcat.add_label(label)
textcat_multilabel = nlp.add_pipe("textcat_multilabel")
for label in ("FEATURE", "REQUEST", "BUG", "QUESTION"):
textcat_multilabel.add_label(label)
nlp.initialize()
annots = {
"cats": {
"POSITIVE": 1.0,
"NEGATIVE": 0.0,
"FEATURE": 1.0,
"QUESTION": 1.0,
"POSITIVE": 1.0,
"NEGATIVE": 0.0,
}
}
doc = nlp.make_doc("hello world")
example = Example.from_dict(doc, annots)
scores = nlp.evaluate([example])
# get the labels from the final pipe
labels = nlp.get_pipe(nlp.pipe_names[-1]).labels
for label in labels:
assert scores["cats_f_per_type"].get(label) is not None
for key in example.reference.cats.keys():
if key not in labels:
assert scores["cats_f_per_type"].get(key) is None
def test_evaluate_multiple_textcat_separate(en_vocab):
"""Test that evaluate can evaluate multiple textcat components separately
with custom scorers."""
def custom_textcat_score(examples, **kwargs):
scores = Scorer.score_cats(
examples,
"cats",
multi_label=False,
**kwargs,
)
return {f"custom_{k}": v for k, v in scores.items()}
@spacy.registry.scorers("test_custom_textcat_scorer")
def make_custom_textcat_scorer():
return custom_textcat_score
nlp = Language(en_vocab)
textcat = nlp.add_pipe(
"textcat",
config={"scorer": {"@scorers": "test_custom_textcat_scorer"}},
)
for label in ("POSITIVE", "NEGATIVE"):
textcat.add_label(label)
textcat_multilabel = nlp.add_pipe("textcat_multilabel")
for label in ("FEATURE", "REQUEST", "BUG", "QUESTION"):
textcat_multilabel.add_label(label)
nlp.initialize()
annots = {
"cats": {
"POSITIVE": 1.0,
"NEGATIVE": 0.0,
"FEATURE": 1.0,
"QUESTION": 1.0,
"POSITIVE": 1.0,
"NEGATIVE": 0.0,
}
}
doc = nlp.make_doc("hello world")
example = Example.from_dict(doc, annots)
scores = nlp.evaluate([example])
# check custom scores for the textcat pipe
assert "custom_cats_f_per_type" in scores
labels = nlp.get_pipe("textcat").labels
assert set(scores["custom_cats_f_per_type"].keys()) == set(labels)
# check default scores for the textcat_multilabel pipe
assert "cats_f_per_type" in scores
labels = nlp.get_pipe("textcat_multilabel").labels
assert set(scores["cats_f_per_type"].keys()) == set(labels)
def vector_modification_pipe(doc):
doc.vector += 1
return doc
def userdata_pipe(doc):
doc.user_data["foo"] = "bar"
return doc
def ner_pipe(doc):
span = Span(doc, 0, 1, label="FIRST")
doc.ents += (span,)
return doc
@pytest.fixture
def sample_vectors():
return [
("spacy", [-0.1, -0.2, -0.3]),
("world", [-0.2, -0.3, -0.4]),
("pipe", [0.7, 0.8, 0.9]),
]
@pytest.fixture
def nlp2(nlp, sample_vectors):
Language.component(
"test_language_vector_modification_pipe", func=vector_modification_pipe
)
Language.component("test_language_userdata_pipe", func=userdata_pipe)
Language.component("test_language_ner_pipe", func=ner_pipe)
add_vecs_to_vocab(nlp.vocab, sample_vectors)
nlp.add_pipe("test_language_vector_modification_pipe")
nlp.add_pipe("test_language_ner_pipe")
nlp.add_pipe("test_language_userdata_pipe")
return nlp
@pytest.fixture
def texts():
data = [
"Hello world.",
"This is spacy.",
"You can use multiprocessing with pipe method.",
"Please try!",
]
return data
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe(nlp2, n_process, texts):
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
texts = texts * 10
expecteds = [nlp2(text) for text in texts]
docs = nlp2.pipe(texts, n_process=n_process, batch_size=2)
for doc, expected_doc in zip(docs, expecteds):
assert_docs_equal(doc, expected_doc)
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe_stream(nlp2, n_process, texts):
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
# check if nlp.pipe can handle infinite length iterator properly.
stream_texts = itertools.cycle(texts)
texts0, texts1 = itertools.tee(stream_texts)
expecteds = (nlp2(text) for text in texts0)
docs = nlp2.pipe(texts1, n_process=n_process, batch_size=2)
n_fetch = 20
for doc, expected_doc in itertools.islice(zip(docs, expecteds), n_fetch):
assert_docs_equal(doc, expected_doc)
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe_error_handler(n_process):
"""Test that the error handling of nlp.pipe works well"""
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
nlp = English()
nlp.add_pipe("merge_subtokens")
nlp.initialize()
texts = ["Curious to see what will happen to this text.", "And this one."]
# the pipeline fails because there's no parser
with pytest.raises(ValueError):
nlp(texts[0])
with pytest.raises(ValueError):
list(nlp.pipe(texts, n_process=n_process))
nlp.set_error_handler(raise_error)
with pytest.raises(ValueError):
list(nlp.pipe(texts, n_process=n_process))
# set explicitely to ignoring
nlp.set_error_handler(ignore_error)
docs = list(nlp.pipe(texts, n_process=n_process))
assert len(docs) == 0
nlp(texts[0])
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe_error_handler_custom(en_vocab, n_process):
"""Test the error handling of a custom component that has no pipe method"""
Language.component("my_evil_component", func=evil_component)
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
nlp = English()
nlp.add_pipe("my_evil_component")
texts = ["TEXT 111", "TEXT 222", "TEXT 333", "TEXT 342", "TEXT 666"]
with pytest.raises(ValueError):
# the evil custom component throws an error
list(nlp.pipe(texts))
nlp.set_error_handler(warn_error)
logger = logging.getLogger("spacy")
with mock.patch.object(logger, "warning") as mock_warning:
# the errors by the evil custom component raise a warning for each
# bad doc
docs = list(nlp.pipe(texts, n_process=n_process))
# HACK/TODO? the warnings in child processes don't seem to be
# detected by the mock logger
if n_process == 1:
mock_warning.assert_called()
assert mock_warning.call_count == 2
assert len(docs) + mock_warning.call_count == len(texts)
assert [doc.text for doc in docs] == ["TEXT 111", "TEXT 333", "TEXT 666"]
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe_error_handler_input_as_tuples(en_vocab, n_process):
"""Test the error handling of nlp.pipe with input as tuples"""
Language.component("my_evil_component", func=evil_component)
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
nlp = English()
nlp.add_pipe("my_evil_component")
texts = [
("TEXT 111", 111),
("TEXT 222", 222),
("TEXT 333", 333),
("TEXT 342", 342),
("TEXT 666", 666),
]
with pytest.raises(ValueError):
list(nlp.pipe(texts, as_tuples=True))
nlp.set_error_handler(warn_error)
logger = logging.getLogger("spacy")
with mock.patch.object(logger, "warning") as mock_warning:
tuples = list(nlp.pipe(texts, as_tuples=True, n_process=n_process))
# HACK/TODO? the warnings in child processes don't seem to be
# detected by the mock logger
if n_process == 1:
mock_warning.assert_called()
assert mock_warning.call_count == 2
assert len(tuples) + mock_warning.call_count == len(texts)
assert (tuples[0][0].text, tuples[0][1]) == ("TEXT 111", 111)
assert (tuples[1][0].text, tuples[1][1]) == ("TEXT 333", 333)
assert (tuples[2][0].text, tuples[2][1]) == ("TEXT 666", 666)
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe_error_handler_pipe(en_vocab, n_process):
"""Test the error handling of a component's pipe method"""
Language.component("my_perhaps_sentences", func=perhaps_set_sentences)
Language.component("assert_sents_error", func=assert_sents_error)
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
texts = [f"{str(i)} is enough. Done" for i in range(100)]
nlp = English()
nlp.add_pipe("my_perhaps_sentences")
nlp.add_pipe("assert_sents_error")
nlp.initialize()
with pytest.raises(ValueError):
# assert_sents_error requires sentence boundaries, will throw an error otherwise
docs = list(nlp.pipe(texts, n_process=n_process, batch_size=10))
nlp.set_error_handler(ignore_error)
docs = list(nlp.pipe(texts, n_process=n_process, batch_size=10))
# we lose/ignore the failing 4,40-49 docs
assert len(docs) == 89
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe_error_handler_make_doc_actual(n_process):
"""Test the error handling for make_doc"""
# TODO: fix so that the following test is the actual behavior
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
nlp = English()
nlp.max_length = 10
texts = ["12345678901234567890", "12345"] * 10
with pytest.raises(ValueError):
list(nlp.pipe(texts, n_process=n_process))
nlp.default_error_handler = ignore_error
if n_process == 1:
with pytest.raises(ValueError):
list(nlp.pipe(texts, n_process=n_process))
else:
docs = list(nlp.pipe(texts, n_process=n_process))
assert len(docs) == 0
@pytest.mark.xfail
@pytest.mark.parametrize("n_process", [1, 2])
def test_language_pipe_error_handler_make_doc_preferred(n_process):
"""Test the error handling for make_doc"""
ops = get_current_ops()
if isinstance(ops, NumpyOps) or n_process < 2:
nlp = English()
nlp.max_length = 10
texts = ["12345678901234567890", "12345"] * 10
with pytest.raises(ValueError):
list(nlp.pipe(texts, n_process=n_process))
nlp.default_error_handler = ignore_error
docs = list(nlp.pipe(texts, n_process=n_process))
assert len(docs) == 0
def test_language_from_config_before_after_init():
name = "test_language_from_config_before_after_init"
ran_before = False
ran_after = False
ran_after_pipeline = False
ran_before_init = False
ran_after_init = False
@registry.callbacks(f"{name}_before")
def make_before_creation():
def before_creation(lang_cls):
nonlocal ran_before
ran_before = True
assert lang_cls is English
lang_cls.Defaults.foo = "bar"
return lang_cls
return before_creation
@registry.callbacks(f"{name}_after")
def make_after_creation():
def after_creation(nlp):
nonlocal ran_after
ran_after = True
assert isinstance(nlp, English)
assert nlp.pipe_names == []
assert nlp.Defaults.foo == "bar"
nlp.meta["foo"] = "bar"
return nlp
return after_creation
@registry.callbacks(f"{name}_after_pipeline")
def make_after_pipeline_creation():
def after_pipeline_creation(nlp):
nonlocal ran_after_pipeline
ran_after_pipeline = True
assert isinstance(nlp, English)
assert nlp.pipe_names == ["sentencizer"]
assert nlp.Defaults.foo == "bar"
assert nlp.meta["foo"] == "bar"
nlp.meta["bar"] = "baz"
return nlp
return after_pipeline_creation
@registry.callbacks(f"{name}_before_init")
def make_before_init():
def before_init(nlp):
nonlocal ran_before_init
ran_before_init = True
nlp.meta["before_init"] = "before"
return nlp
return before_init
@registry.callbacks(f"{name}_after_init")
def make_after_init():
def after_init(nlp):
nonlocal ran_after_init
ran_after_init = True
nlp.meta["after_init"] = "after"
return nlp
return after_init
config = {
"nlp": {
"pipeline": ["sentencizer"],
"before_creation": {"@callbacks": f"{name}_before"},
"after_creation": {"@callbacks": f"{name}_after"},
"after_pipeline_creation": {"@callbacks": f"{name}_after_pipeline"},
},
"components": {"sentencizer": {"factory": "sentencizer"}},
"initialize": {
"before_init": {"@callbacks": f"{name}_before_init"},
"after_init": {"@callbacks": f"{name}_after_init"},
},
}
nlp = English.from_config(config)
assert nlp.Defaults.foo == "bar"
assert nlp.meta["foo"] == "bar"
assert nlp.meta["bar"] == "baz"
assert "before_init" not in nlp.meta
assert "after_init" not in nlp.meta
assert nlp.pipe_names == ["sentencizer"]
assert nlp("text")
nlp.initialize()
assert nlp.meta["before_init"] == "before"
assert nlp.meta["after_init"] == "after"
assert all(
[ran_before, ran_after, ran_after_pipeline, ran_before_init, ran_after_init]
)
def test_language_from_config_before_after_init_invalid():
"""Check that an error is raised if function doesn't return nlp."""
name = "test_language_from_config_before_after_init_invalid"
registry.callbacks(f"{name}_before1", func=lambda: lambda nlp: None)
registry.callbacks(f"{name}_before2", func=lambda: lambda nlp: nlp())
registry.callbacks(f"{name}_after1", func=lambda: lambda nlp: None)
registry.callbacks(f"{name}_after1", func=lambda: lambda nlp: English)
for callback_name in [f"{name}_before1", f"{name}_before2"]:
config = {"nlp": {"before_creation": {"@callbacks": callback_name}}}
with pytest.raises(ValueError):
English.from_config(config)
for callback_name in [f"{name}_after1", f"{name}_after2"]:
config = {"nlp": {"after_creation": {"@callbacks": callback_name}}}
with pytest.raises(ValueError):
English.from_config(config)
for callback_name in [f"{name}_after1", f"{name}_after2"]:
config = {"nlp": {"after_pipeline_creation": {"@callbacks": callback_name}}}
with pytest.raises(ValueError):
English.from_config(config)
def test_language_whitespace_tokenizer():
"""Test the custom whitespace tokenizer from the docs."""
class WhitespaceTokenizer:
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split(" ")
spaces = [True] * len(words)
# Avoid zero-length tokens
for i, word in enumerate(words):
if word == "":
words[i] = " "
spaces[i] = False
# Remove the final trailing space
if words[-1] == " ":
words = words[0:-1]
spaces = spaces[0:-1]
else:
spaces[-1] = False
return Doc(self.vocab, words=words, spaces=spaces)
nlp = spacy.blank("en")
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
text = " What's happened to me? he thought. It wasn't a dream. "
doc = nlp(text)
assert doc.text == text
def test_language_custom_tokenizer():
"""Test that a fully custom tokenizer can be plugged in via the registry."""
name = "test_language_custom_tokenizer"
class CustomTokenizer:
"""Dummy "tokenizer" that splits on spaces and adds prefix to each word."""
def __init__(self, nlp, prefix):
self.vocab = nlp.vocab
self.prefix = prefix
def __call__(self, text):
words = [f"{self.prefix}{word}" for word in text.split(" ")]
return Doc(self.vocab, words=words)
@registry.tokenizers(name)
def custom_create_tokenizer(prefix: str = "_"):
def create_tokenizer(nlp):
return CustomTokenizer(nlp, prefix=prefix)
return create_tokenizer
config = {"nlp": {"tokenizer": {"@tokenizers": name}}}
nlp = English.from_config(config)
doc = nlp("hello world")
assert [t.text for t in doc] == ["_hello", "_world"]
doc = list(nlp.pipe(["hello world"]))[0]
assert [t.text for t in doc] == ["_hello", "_world"]
def test_language_from_config_invalid_lang():
"""Test that calling Language.from_config raises an error and lang defined
in config needs to match language-specific subclasses."""
config = {"nlp": {"lang": "en"}}
with pytest.raises(ValueError):
Language.from_config(config)
with pytest.raises(ValueError):
German.from_config(config)
def test_spacy_blank():
nlp = spacy.blank("en")
assert nlp.config["training"]["dropout"] == 0.1
config = {"training": {"dropout": 0.2}}
meta = {"name": "my_custom_model"}
nlp = spacy.blank("en", config=config, meta=meta)
assert nlp.config["training"]["dropout"] == 0.2
assert nlp.meta["name"] == "my_custom_model"
@pytest.mark.parametrize(
"lang,target",
[
("en", "en"),
("fra", "fr"),
("fre", "fr"),
("iw", "he"),
("mo", "ro"),
("mul", "xx"),
("no", "nb"),
("pt-BR", "pt"),
("xx", "xx"),
("zh-Hans", "zh"),
("zh-Hant", None),
("zxx", None),
],
)
def test_language_matching(lang, target):
"""
Test that we can look up languages by equivalent or nearly-equivalent
language codes.
"""
assert find_matching_language(lang) == target
@pytest.mark.parametrize(
"lang,target",
[
("en", "en"),
("fra", "fr"),
("fre", "fr"),
("iw", "he"),
("mo", "ro"),
("mul", "xx"),
("no", "nb"),
("pt-BR", "pt"),
("xx", "xx"),
("zh-Hans", "zh"),
],
)
def test_blank_languages(lang, target):
"""
Test that we can get spacy.blank in various languages, including codes
that are defined to be equivalent or that match by CLDR language matching.
"""
nlp = spacy.blank(lang)
assert nlp.lang == target
@pytest.mark.parametrize("value", [False, None, ["x", "y"], Language, Vocab])
def test_language_init_invalid_vocab(value):
err_fragment = "invalid value"
with pytest.raises(ValueError) as e:
Language(value)
assert err_fragment in str(e.value)
def test_language_source_and_vectors(nlp2):
nlp = Language(Vocab())
textcat = nlp.add_pipe("textcat")
for label in ("POSITIVE", "NEGATIVE"):
textcat.add_label(label)
nlp.initialize()
long_string = "thisisalongstring"
assert long_string not in nlp.vocab.strings
assert long_string not in nlp2.vocab.strings
nlp.vocab.strings.add(long_string)
assert nlp.vocab.vectors.to_bytes() != nlp2.vocab.vectors.to_bytes()
vectors_bytes = nlp.vocab.vectors.to_bytes()
with pytest.warns(UserWarning):
nlp2.add_pipe("textcat", name="textcat2", source=nlp)
# strings should be added
assert long_string in nlp2.vocab.strings
# vectors should remain unmodified
assert nlp.vocab.vectors.to_bytes() == vectors_bytes
@pytest.mark.parametrize("n_process", [1, 2])
def test_pass_doc_to_pipeline(nlp, n_process):
texts = ["cats", "dogs", "guinea pigs"]
docs = [nlp.make_doc(text) for text in texts]
assert not any(len(doc.cats) for doc in docs)
doc = nlp(docs[0])
assert doc.text == texts[0]
assert len(doc.cats) > 0
if isinstance(get_current_ops(), NumpyOps) or n_process < 2:
docs = nlp.pipe(docs, n_process=n_process)
assert [doc.text for doc in docs] == texts
assert all(len(doc.cats) for doc in docs)
def test_invalid_arg_to_pipeline(nlp):
str_list = ["This is a text.", "This is another."]
with pytest.raises(ValueError):
nlp(str_list) # type: ignore
assert len(list(nlp.pipe(str_list))) == 2
int_list = [1, 2, 3]
with pytest.raises(ValueError):
list(nlp.pipe(int_list)) # type: ignore
with pytest.raises(ValueError):
nlp(int_list) # type: ignore
@pytest.mark.skipif(
not isinstance(get_current_ops(), CupyOps), reason="test requires GPU"
)
def test_multiprocessing_gpu_warning(nlp2, texts):
texts = texts * 10
docs = nlp2.pipe(texts, n_process=2, batch_size=2)
with pytest.warns(UserWarning, match="multiprocessing with GPU models"):
with pytest.raises(ValueError):
# Trigger multi-processing.
for _ in docs:
pass
def test_dot_in_factory_names(nlp):
Language.component("my_evil_component", func=evil_component)
nlp.add_pipe("my_evil_component")
with pytest.raises(ValueError, match="not permitted"):
Language.component("my.evil.component.v1", func=evil_component)
with pytest.raises(ValueError, match="not permitted"):
Language.factory("my.evil.component.v1", func=evil_component)
def test_component_return():
"""Test that an error is raised if components return a type other than a
doc."""
nlp = English()
@Language.component("test_component_good_pipe")
def good_pipe(doc):
return doc
nlp.add_pipe("test_component_good_pipe")
nlp("text")
nlp.remove_pipe("test_component_good_pipe")
@Language.component("test_component_bad_pipe")
def bad_pipe(doc):
return doc.text
nlp.add_pipe("test_component_bad_pipe")
with pytest.raises(ValueError, match="instead of a Doc"):
nlp("text")
|
0e0b758646d77aafdf0a22db81928e6b96583db3
|
7bea5adf7d6284fbad0131d665e957d58adfe7c7
|
/allauth/socialaccount/providers/paypal/views.py
|
4f769c0b0af2eaff5bf95d39386446231d7065b2
|
[
"MIT"
] |
permissive
|
pennersr/django-allauth
|
50c9e71c3666785368e92ed9e19ea0f6a5438cd2
|
6b8911a5ebbabda0d446f2743bd4d00d250ed500
|
refs/heads/main
| 2023-09-03T16:48:10.988418
| 2023-09-02T08:00:53
| 2023-09-02T08:00:53
| 976,994
| 7,719
| 3,481
|
MIT
| 2023-09-14T15:06:57
| 2010-10-10T20:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
views.py
|
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import PaypalProvider
class PaypalOAuth2Adapter(OAuth2Adapter):
provider_id = PaypalProvider.id
supports_state = False
@property
def authorize_url(self):
path = "webapps/auth/protocol/openidconnect/v1/authorize"
return "https://www.{0}/{1}".format(self._get_endpoint(), path)
@property
def access_token_url(self):
path = "v1/identity/openidconnect/tokenservice"
return "https://api.{0}/{1}".format(self._get_endpoint(), path)
@property
def profile_url(self):
path = "v1/identity/openidconnect/userinfo"
return "https://api.{0}/{1}".format(self._get_endpoint(), path)
def _get_endpoint(self):
settings = self.get_provider().get_settings()
if settings.get("MODE") == "live":
return "paypal.com"
else:
return "sandbox.paypal.com"
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
params={"schema": "openid", "access_token": token},
)
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PaypalOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PaypalOAuth2Adapter)
|
cd5e6a88168f88a8ca1d5f1555c759b787a852fe
|
3c537c49c3acde75556f3e88033a72aae4b675c2
|
/train_affectnet.py
|
6369325f9fe0b4c1bb8a3ef18ef0bd2f03ef67e7
|
[] |
no_license
|
yangyuke001/FIIQA-PyTorch
|
a121a34f48131512688bd286ec06883703521239
|
29ab6af25a4e5beeffc6709752ff983fc6e7f4a6
|
refs/heads/master
| 2023-07-08T19:04:59.210444
| 2023-06-13T01:19:04
| 2023-06-13T01:19:04
| 185,769,820
| 325
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,057
|
py
|
train_affectnet.py
|
'''Train CK+ with PyTorch.'''
# 10 crop for data enhancement
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import transforms,utils
from torch.utils.data import DataLoader
import transforms as transforms
import numpy as np
import os
import argparse
import utils
from CK import CK
from torch.autograd import Variable
from models import *
from ShuffleNetV2 import ShuffleNetV2
from flops_counter_pytorch.ptflops import get_model_complexity_info
from summary import model_summary
from datagen import ListDataset
train_data = '../train_val_imgs/Manually/Manually_train_croped'
test_data = '../train_val_imgs/Manually/Manually_validation_croped'
parser = argparse.ArgumentParser(description='PyTorch CK+ CNN Training')
#parser.add_argument('--model', type=str, default='VGG19', help='CNN architecture')
parser.add_argument('--dataset', type=str, default='CK+', help='dataset')
parser.add_argument('--fold', default=1, type=int, help='k fold number')
parser.add_argument('--bs', default=128, type=int, help='batch_size')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
opt = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_Test_acc = 0 # best PrivateTest accuracy
best_Test_acc_epoch = 0
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
learning_rate_decay_start = 20 # 50
learning_rate_decay_every = 1 # 5
learning_rate_decay_rate = 0.8 # 0.9
total_epoch = 500
bs = 128
input_size = 64
cut_size = input_size - 1
n_class=7
#path = os.path.join(opt.dataset + '_' + opt.model, str(opt.fold))
path = './AffectNet+ShuffleNetV2/'
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.Resize(input_size),
transforms.RandomCrop(cut_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])
transform_test = transforms.Compose([
transforms.Resize(input_size),
#transforms.RandomCrop(cut_size),
#transforms.RandomHorizontalFlip(),
transforms.TenCrop(cut_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
#transforms.ToTensor(),
#transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])
#trainset=torchvision.datasets.ImageFolder(train_data,transform_train)
trainset = ListDataset(root='../train_val_imgs/Manually/Manually_train_croped/', list_file='./AffectNet/train.txt', transform=transform_train)
trainloader=DataLoader(trainset,bs,shuffle=True, num_workers=12)
#testset=torchvision.datasets.ImageFolder(test_data,transform_test)
testset = ListDataset(root='../train_val_imgs/Manually/Manually_validation_croped/', list_file='./AffectNet/val.txt', transform=transform_test)
testloader=DataLoader(testset,batch_size=128,shuffle=True, num_workers=12)
net = ShuffleNetV2(input_size,n_class)
'''
model_summary(net,input_size=(3,input_size,input_size))
flops, params = get_model_complexity_info(net, (input_size, input_size), as_strings=True, print_per_layer_stat=False)
print('Flops: ' + flops)
print('Params: ' + params)
#net = net.to(device=my_device)
'''
if opt.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(path), 'Error: no checkpoint directory found!'
checkpoint = torch.load(os.path.join(path,'ShuffleNetV2.pth'))
net.load_state_dict(checkpoint['net'])
best_Test_acc = checkpoint['best_Test_acc']
best_Test_acc_epoch = checkpoint['best_Test_acc_epoch']
start_epoch = best_Test_acc_epoch + 1
else:
print('==> Building model..')
if use_cuda:
net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer=optim.AdaBound(net.parameters(), lr=opt.lr, final_lr=0.1)
#optimizer = optim.SGD(net.parameters(), lr=opt.lr, momentum=0.9, weight_decay=1e-4)
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
global Train_acc
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
#inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
utils.clip_gradient(optimizer, 0.1) #clip_gradient能有效了控制梯度爆炸的影响,使得最终的loss能下降到满意的结果
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
utils.progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
Train_acc = 100.*correct/total
# test
def test(epoch):
global Test_acc
global best_Test_acc
global best_Test_acc_epoch
net.eval()
PrivateTest_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
bs, ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops
loss = criterion(outputs_avg, targets)
PrivateTest_loss += loss.item()
_, predicted = torch.max(outputs_avg.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
utils.progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (PrivateTest_loss / (batch_idx + 1), \
100. * correct / total, correct, total))
# Save checkpoint.
Test_acc = 100.*correct/total
if Test_acc > best_Test_acc:
print('Saving..')
print("best_Test_acc: %0.3f" % Test_acc)
state = {'net': net.state_dict() if use_cuda else net,
'best_Test_acc': Test_acc,
'best_Test_acc_epoch': epoch,
}
if not os.path.isdir(opt.dataset + '_' + 'ShuffleNetV2'):
os.mkdir(opt.dataset + '_' + 'ShuffleNetV2')
if not os.path.isdir(path):
os.mkdir(path)
#torch.save(state, os.path.join(path, str(best_Test_acc) + '_ShuffleNetV2.pth'))
best_Test_acc = Test_acc
best_Test_acc_epoch = epoch
torch.save(state, os.path.join(path, str(best_Test_acc) + '_'+str(input_size)+'_ShuffleNetV2.pth'))
for epoch in range(start_epoch, total_epoch):
train(epoch)
test(epoch)
print("best_Test_acc: %0.3f" % best_Test_acc)
print("best_Test_acc_epoch: %d" % best_Test_acc_epoch)
|
a6b9d2389e8602933ffcc95b8a31434c64c5a928
|
71c64688447767d68f680045ab6e760da2079081
|
/scripts/checkpoint_averaging.py
|
894c535ca9b1310394e06b5551c479f64793d498
|
[
"BSD-3-Clause"
] |
permissive
|
bzhangGo/zero
|
c5d347612f29b58218e82ed0c00573f7d4b67002
|
d97e2c21b1c6d0467fe821223042247bf2b46bf9
|
refs/heads/master
| 2023-05-27T09:52:09.549934
| 2023-05-08T11:04:16
| 2023-05-08T11:04:16
| 152,663,011
| 145
| 27
|
BSD-3-Clause
| 2023-05-08T11:04:17
| 2018-10-11T22:25:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,253
|
py
|
checkpoint_averaging.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import operator
import os
import numpy as np
import tensorflow as tf
def parseargs():
msg = "Average checkpoints"
usage = "average.py [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
parser.add_argument("--path", type=str, required=True,
help="checkpoint dir")
parser.add_argument("--checkpoints", type=int, required=True,
help="number of checkpoints to use")
parser.add_argument("--output", type=str, help="output path")
parser.add_argument("--gpu", type=int, default=0,
help="the default gpu device index")
return parser.parse_args()
def get_checkpoints(path):
if not tf.gfile.Exists(os.path.join(path, "checkpoint")):
raise ValueError("Cannot find checkpoints in %s" % path)
checkpoint_names = []
with tf.gfile.GFile(os.path.join(path, "checkpoint")) as fd:
# Skip the first line
fd.readline()
for line in fd:
name = line.strip().split(":")[-1].strip()[1:-1]
key = int(name.split("-")[-1])
checkpoint_names.append((key, os.path.join(path, name)))
sorted_names = sorted(checkpoint_names, key=operator.itemgetter(0),
reverse=True)
return [item[-1] for item in sorted_names]
def checkpoint_exists(path):
return (tf.gfile.Exists(path) or tf.gfile.Exists(path + ".meta") or
tf.gfile.Exists(path + ".index"))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
checkpoints = get_checkpoints(FLAGS.path)
checkpoints = checkpoints[:FLAGS.checkpoints]
if not checkpoints:
raise ValueError("No checkpoints provided for averaging.")
checkpoints = [c for c in checkpoints if checkpoint_exists(c)]
if not checkpoints:
raise ValueError(
"None of the provided checkpoints exist. %s" % FLAGS.checkpoints
)
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if not name.startswith("global_step"):
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
tf.logging.info("Read from checkpoint %s", checkpoint)
# Average checkpoints
for name in var_values:
var_values[name] /= len(checkpoints)
tf_vars = [
tf.get_variable(name, shape=var_values[name].shape,
dtype=var_dtypes[name]) for name in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step = tf.Variable(0, name="global_step", trainable=False,
dtype=tf.int64)
saver = tf.train.Saver(tf.global_variables())
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
sess_config.gpu_options.visible_device_list = "%s" % FLAGS.gpu
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
var_values.iteritems()):
sess.run(assign_op, {p: value})
saved_name = os.path.join(FLAGS.output, "average")
saver.save(sess, saved_name, global_step=global_step)
tf.logging.info("Averaged checkpoints saved in %s", saved_name)
params_pattern = os.path.join(FLAGS.path, "*.json")
params_files = tf.gfile.Glob(params_pattern)
for name in params_files:
new_name = name.replace(FLAGS.path.rstrip("/"),
FLAGS.output.rstrip("/"))
tf.gfile.Copy(name, new_name, overwrite=True)
if __name__ == "__main__":
FLAGS = parseargs()
tf.app.run()
|
908ff09b8bc1158641137bdf1dd0a16f3460347f
|
080db1ae362de6823e7c78ab6071c82e347ce967
|
/jina/serve/runtimes/servers/__init__.py
|
195a08ed5e4f6efd24c19b65a98e191768d27d12
|
[
"Apache-2.0"
] |
permissive
|
jina-ai/jina
|
c06898dc31dd3de1f917f30305e9460efedf97c3
|
23c7b8c78fc4ad67d16d83fc0c9f0eae9e935e71
|
refs/heads/master
| 2023-08-30T23:04:45.267920
| 2023-08-24T13:49:49
| 2023-08-24T13:49:49
| 240,315,046
| 20,687
| 2,460
|
Apache-2.0
| 2023-09-14T12:30:45
| 2020-02-13T17:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 10,011
|
py
|
__init__.py
|
import abc
import time
from types import SimpleNamespace
from typing import TYPE_CHECKING, Dict, Optional, Union
from jina.logging.logger import JinaLogger
from jina.serve.instrumentation import InstrumentationMixin
from jina.serve.runtimes.monitoring import MonitoringMixin
import threading
__all__ = ['BaseServer']
if TYPE_CHECKING:
import multiprocessing
class BaseServer(MonitoringMixin, InstrumentationMixin):
"""
BaseServer class that is handled by AsyncNewLoopRuntime. It makes sure that the Request Handler is exposed via a server.
"""
def __init__(
self,
name: Optional[str] = 'gateway',
runtime_args: Optional[Dict] = None,
req_handler_cls=None,
req_handler=None,
is_cancel=None,
**kwargs,
):
self.name = name or ''
self.runtime_args = runtime_args
self.works_as_load_balancer = False
self.is_cancel = is_cancel or threading.Event()
if isinstance(runtime_args, Dict):
self.works_as_load_balancer = runtime_args.get('gateway_load_balancer', False)
if isinstance(self.runtime_args, dict):
self.logger = JinaLogger(self.name, **self.runtime_args)
else:
self.logger = JinaLogger(self.name, **vars(self.runtime_args))
self.req_handler_cls = req_handler_cls
self._request_handler = None
self.server = None
self._add_gateway_args()
self.tracing = self.runtime_args.tracing
self.tracer_provider = self.runtime_args.tracer_provider
self._setup_instrumentation(
name=self.name,
tracing=self.runtime_args.tracing,
traces_exporter_host=self.runtime_args.traces_exporter_host,
traces_exporter_port=self.runtime_args.traces_exporter_port,
metrics=self.runtime_args.metrics,
metrics_exporter_host=self.runtime_args.metrics_exporter_host,
metrics_exporter_port=self.runtime_args.metrics_exporter_port,
)
self._request_handler = req_handler or self._get_request_handler()
if hasattr(self._request_handler, 'streamer'):
self.streamer = self._request_handler.streamer # backward compatibility
self.executor = self._request_handler.executor # backward compatibility
def _teardown_instrumentation(self):
try:
if self.tracing and self.tracer_provider:
if hasattr(self.tracer_provider, 'force_flush'):
self.tracer_provider.force_flush()
if hasattr(self.tracer_provider, 'shutdown'):
self.tracer_provider.shutdown()
if self.metrics and self.meter_provider:
if hasattr(self.meter_provider, 'force_flush'):
self.meter_provider.force_flush()
if hasattr(self.meter_provider, 'shutdown'):
self.meter_provider.shutdown()
except Exception as ex:
self.logger.warning(f'Exception during instrumentation teardown, {str(ex)}')
def _get_request_handler(self):
self._setup_monitoring(
monitoring=self.runtime_args.monitoring,
port_monitoring=self.runtime_args.port_monitoring,
)
return self.req_handler_cls(
args=self.runtime_args,
logger=self.logger,
metrics_registry=self.metrics_registry,
meter_provider=self.meter_provider,
tracer_provider=self.tracer_provider,
tracer=self.tracer,
meter=self.meter,
runtime_name=self.name,
aio_tracing_client_interceptors=self.aio_tracing_client_interceptors(),
tracing_client_interceptor=self.tracing_client_interceptor(),
deployment_name=self.name.split('/')[0],
works_as_load_balancer=self.works_as_load_balancer
)
def _add_gateway_args(self):
# TODO: rename and change
from jina.parsers import set_gateway_runtime_args_parser
parser = set_gateway_runtime_args_parser()
default_args = parser.parse_args([])
default_args_dict = dict(vars(default_args))
_runtime_args = (
self.runtime_args
if isinstance(self.runtime_args, dict)
else vars(self.runtime_args or {})
)
runtime_set_args = {
'tracer_provider': None,
'grpc_tracing_server_interceptors': None,
'runtime_name': _runtime_args.get('name', 'test'),
'metrics_registry': None,
'meter': None,
'aio_tracing_client_interceptors': None,
'tracing_client_interceptor': None,
}
runtime_args_dict = {**runtime_set_args, **default_args_dict, **_runtime_args}
self.runtime_args = SimpleNamespace(**runtime_args_dict)
@property
def port(self):
"""Gets the first port of the port list argument. To be used in the regular case where a Gateway exposes a single port
:return: The first port to be exposed
"""
return self.runtime_args.port[0] if isinstance(self.runtime_args.port, list) else self.runtime_args.port
@property
def ports(self):
"""Gets all the list of ports from the runtime_args as a list.
:return: The lists of ports to be exposed
"""
return self.runtime_args.port if isinstance(self.runtime_args.port, list) else [self.runtime_args.port]
@property
def protocols(self):
"""Gets all the list of protocols from the runtime_args as a list.
:return: The lists of protocols to be exposed
"""
return self.runtime_args.protocol if isinstance(self.runtime_args.protocol, list) else [self.runtime_args.protocol]
@property
def host(self):
"""Gets the host from the runtime_args
:return: The host where to bind the gateway
"""
return self.runtime_args.host
@abc.abstractmethod
async def setup_server(self):
"""Setup server"""
...
@abc.abstractmethod
async def run_server(self):
"""Run server forever"""
...
@abc.abstractmethod
async def shutdown(self):
"""Shutdown the server and free other allocated resources, e.g, streamer object, health check service, ..."""
self._teardown_instrumentation()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def is_ready(
ctrl_address: str,
protocol: Optional[str] = 'grpc',
timeout: float = 1.0,
logger=None,
**kwargs,
) -> bool:
"""
Check if status is ready.
:param ctrl_address: the address where the control request needs to be sent
:param protocol: protocol of the gateway runtime
:param timeout: timeout of grpc call in seconds
:param logger: JinaLogger to be used
:param kwargs: extra keyword arguments
:return: True if status is ready else False.
"""
from jina.serve.runtimes.servers.grpc import GRPCServer
from jina.serve.runtimes.servers.http import FastAPIBaseServer
from jina.enums import ProtocolType
if (
protocol is None
or protocol == ProtocolType.GRPC
or protocol == 'grpc'
):
res = GRPCServer.is_ready(ctrl_address)
else:
res = FastAPIBaseServer.is_ready(ctrl_address)
return res
@staticmethod
async def async_is_ready(
ctrl_address: str,
protocol: Optional[str] = 'grpc',
timeout: float = 1.0,
logger=None,
**kwargs,
) -> bool:
"""
Check if status is ready.
:param ctrl_address: the address where the control request needs to be sent
:param protocol: protocol of the gateway runtime
:param timeout: timeout of grpc call in seconds
:param logger: JinaLogger to be used
:param kwargs: extra keyword arguments
:return: True if status is ready else False.
"""
from jina.serve.runtimes.servers.grpc import GRPCServer
from jina.serve.runtimes.servers.http import FastAPIBaseServer
from jina.enums import ProtocolType
if (
protocol is None
or protocol == ProtocolType.GRPC
or protocol == 'grpc'
):
res = await GRPCServer.async_is_ready(ctrl_address, logger=logger)
else:
res = await FastAPIBaseServer.async_is_ready(ctrl_address, logger=logger)
return res
@classmethod
def wait_for_ready_or_shutdown(
cls,
timeout: Optional[float],
ready_or_shutdown_event: Union['multiprocessing.Event', 'threading.Event'],
ctrl_address: str,
health_check: bool = False,
**kwargs,
):
"""
Check if the runtime has successfully started
:param timeout: The time to wait before readiness or failure is determined
:param ctrl_address: the address where the control message needs to be sent
:param ready_or_shutdown_event: the multiprocessing event to detect if the process failed or is ready
:param health_check: if true, a grpc health check will be used instead of relying on the event
:param kwargs: extra keyword arguments
:return: True if is ready or it needs to be shutdown
"""
timeout_ns = 1000000000 * timeout if timeout else None
now = time.time_ns()
if health_check:
return cls.is_ready(ctrl_address, timeout)
while timeout_ns is None or time.time_ns() - now < timeout_ns:
if ready_or_shutdown_event.is_set() or cls.is_ready(ctrl_address, **kwargs):
return True
time.sleep(0.1)
return False
|
8a7dcc3b64a165480e46808647df43276e718e9f
|
302ce5ab1045ee93845608c96580c63d54d730af
|
/src/spikeinterface/sortingcomponents/clustering/random_projections.py
|
fcbcac097f52a89b7c009bf7c4b4e565dd876cab
|
[
"MIT"
] |
permissive
|
SpikeInterface/spikeinterface
|
f900b62720860b2881d2e6b5fa4441e0e560f625
|
ee2237b3f5ce2347b2ec9df90e97b0ee6c738dcf
|
refs/heads/main
| 2023-09-02T11:27:54.687021
| 2023-09-01T13:48:29
| 2023-09-01T13:48:29
| 196,581,117
| 295
| 133
|
MIT
| 2023-09-14T19:12:16
| 2019-07-12T13:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 9,425
|
py
|
random_projections.py
|
# """Sorting components: clustering"""
from pathlib import Path
import shutil
import numpy as np
try:
import hdbscan
HAVE_HDBSCAN = True
except:
HAVE_HDBSCAN = False
import random, string, os
from spikeinterface.core import get_global_tmp_folder, get_noise_levels, get_channel_distances, get_random_data_chunks
from sklearn.preprocessing import QuantileTransformer, MaxAbsScaler
from spikeinterface.core.waveform_tools import extract_waveforms_to_buffers
from .clustering_tools import remove_duplicates, remove_duplicates_via_matching, remove_duplicates_via_dip
from spikeinterface.core import NumpySorting
from spikeinterface.core import extract_waveforms
from spikeinterface.sortingcomponents.features_from_peaks import compute_features_from_peaks, EnergyFeature
class RandomProjectionClustering:
"""
hdbscan clustering on peak_locations previously done by localize_peaks()
"""
_default_params = {
"hdbscan_kwargs": {
"min_cluster_size": 20,
"allow_single_cluster": True,
"core_dist_n_jobs": os.cpu_count(),
"cluster_selection_method": "leaf",
},
"cleaning_kwargs": {},
"radius_um": 100,
"max_spikes_per_unit": 200,
"selection_method": "closest_to_centroid",
"nb_projections": {"ptp": 8, "energy": 2},
"ms_before": 1.5,
"ms_after": 1.5,
"random_seed": 42,
"cleaning_method": "matching",
"shared_memory": False,
"min_values": {"ptp": 0, "energy": 0},
"tmp_folder": None,
"job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "10M", "verbose": True, "progress_bar": True},
}
@classmethod
def main_function(cls, recording, peaks, params):
assert HAVE_HDBSCAN, "random projections clustering need hdbscan to be installed"
if "n_jobs" in params["job_kwargs"]:
if params["job_kwargs"]["n_jobs"] == -1:
params["job_kwargs"]["n_jobs"] = os.cpu_count()
if "core_dist_n_jobs" in params["hdbscan_kwargs"]:
if params["hdbscan_kwargs"]["core_dist_n_jobs"] == -1:
params["hdbscan_kwargs"]["core_dist_n_jobs"] = os.cpu_count()
d = params
verbose = d["job_kwargs"]["verbose"]
peak_dtype = [("sample_index", "int64"), ("unit_index", "int64"), ("segment_index", "int64")]
fs = recording.get_sampling_frequency()
nbefore = int(params["ms_before"] * fs / 1000.0)
nafter = int(params["ms_after"] * fs / 1000.0)
num_samples = nbefore + nafter
num_chans = recording.get_num_channels()
noise_levels = get_noise_levels(recording, return_scaled=False)
np.random.seed(d["random_seed"])
features_params = {}
features_list = []
noise_snippets = None
for proj_type in ["ptp", "energy"]:
if d["nb_projections"][proj_type] > 0:
features_list += [f"random_projections_{proj_type}"]
if d["min_values"][proj_type] == "auto":
if noise_snippets is None:
num_segments = recording.get_num_segments()
num_chunks = 3 * d["max_spikes_per_unit"] // num_segments
noise_snippets = get_random_data_chunks(
recording, num_chunks_per_segment=num_chunks, chunk_size=num_samples, seed=42
)
noise_snippets = noise_snippets.reshape(num_chunks, num_samples, num_chans)
if proj_type == "energy":
data = np.linalg.norm(noise_snippets, axis=1)
min_values = np.median(data, axis=0)
elif proj_type == "ptp":
data = np.ptp(noise_snippets, axis=1)
min_values = np.median(data, axis=0)
elif d["min_values"][proj_type] > 0:
min_values = d["min_values"][proj_type]
else:
min_values = None
projections = np.random.randn(num_chans, d["nb_projections"][proj_type])
features_params[f"random_projections_{proj_type}"] = {
"radius_um": params["radius_um"],
"projections": projections,
"min_values": min_values,
}
features_data = compute_features_from_peaks(
recording, peaks, features_list, features_params, ms_before=1, ms_after=1, **params["job_kwargs"]
)
if len(features_data) > 1:
hdbscan_data = np.hstack((features_data[0], features_data[1]))
else:
hdbscan_data = features_data[0]
import sklearn
clustering = hdbscan.hdbscan(hdbscan_data, **d["hdbscan_kwargs"])
peak_labels = clustering[0]
labels = np.unique(peak_labels)
labels = labels[labels >= 0]
best_spikes = {}
nb_spikes = 0
all_indices = np.arange(0, peak_labels.size)
max_spikes = params["max_spikes_per_unit"]
selection_method = params["selection_method"]
for unit_ind in labels:
mask = peak_labels == unit_ind
if selection_method == "closest_to_centroid":
data = hdbscan_data[mask]
centroid = np.median(data, axis=0)
distances = sklearn.metrics.pairwise_distances(centroid[np.newaxis, :], data)[0]
best_spikes[unit_ind] = all_indices[mask][np.argsort(distances)[:max_spikes]]
elif selection_method == "random":
best_spikes[unit_ind] = np.random.permutation(all_indices[mask])[:max_spikes]
nb_spikes += best_spikes[unit_ind].size
spikes = np.zeros(nb_spikes, dtype=peak_dtype)
mask = np.zeros(0, dtype=np.int32)
for unit_ind in labels:
mask = np.concatenate((mask, best_spikes[unit_ind]))
idx = np.argsort(mask)
mask = mask[idx]
spikes["sample_index"] = peaks[mask]["sample_index"]
spikes["segment_index"] = peaks[mask]["segment_index"]
spikes["unit_index"] = peak_labels[mask]
cleaning_method = params["cleaning_method"]
if verbose:
print("We found %d raw clusters, starting to clean with %s..." % (len(labels), cleaning_method))
if cleaning_method == "cosine":
wfs_arrays = extract_waveforms_to_buffers(
recording,
spikes,
labels,
nbefore,
nafter,
mode="shared_memory",
return_scaled=False,
folder=None,
dtype=recording.get_dtype(),
sparsity_mask=None,
copy=True,
**params["job_kwargs"],
)
labels, peak_labels = remove_duplicates(
wfs_arrays, noise_levels, peak_labels, num_samples, num_chans, **params["cleaning_kwargs"]
)
elif cleaning_method == "dip":
wfs_arrays = {}
for label in labels:
mask = label == peak_labels
wfs_arrays[label] = hdbscan_data[mask]
labels, peak_labels = remove_duplicates_via_dip(wfs_arrays, peak_labels, **params["cleaning_kwargs"])
elif cleaning_method == "matching":
# create a tmp folder
if params["tmp_folder"] is None:
name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8))
tmp_folder = get_global_tmp_folder() / name
else:
tmp_folder = Path(params["tmp_folder"])
if params["shared_memory"]:
waveform_folder = None
mode = "memory"
else:
waveform_folder = tmp_folder / "waveforms"
mode = "folder"
sorting_folder = tmp_folder / "sorting"
sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs)
sorting = sorting.save(folder=sorting_folder)
we = extract_waveforms(
recording,
sorting,
waveform_folder,
ms_before=params["ms_before"],
ms_after=params["ms_after"],
**params["job_kwargs"],
return_scaled=False,
mode=mode,
)
cleaning_matching_params = params["job_kwargs"].copy()
cleaning_matching_params["chunk_duration"] = "100ms"
cleaning_matching_params["n_jobs"] = 1
cleaning_matching_params["verbose"] = False
cleaning_matching_params["progress_bar"] = False
cleaning_params = params["cleaning_kwargs"].copy()
cleaning_params["tmp_folder"] = tmp_folder
labels, peak_labels = remove_duplicates_via_matching(
we, noise_levels, peak_labels, job_kwargs=cleaning_matching_params, **cleaning_params
)
if params["tmp_folder"] is None:
shutil.rmtree(tmp_folder)
else:
shutil.rmtree(tmp_folder / "waveforms")
shutil.rmtree(tmp_folder / "sorting")
if verbose:
print("We kept %d non-duplicated clusters..." % len(labels))
return labels, peak_labels
|
3a92296bbb7899449b0193069ebd79108e445d62
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/modules/dbnd/src/dbnd/_vendor/pendulum/lang/ko.py
|
60d1bc38d71153ea9248bb727582620e82832b20
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
ko.py
|
# -*- coding: utf-8 -*-
translations = {
# Days
'days': {
0: '일요일',
1: '월요일',
2: '화요일',
3: '수요일',
4: '목요일',
5: '금요일',
6: '토요일'
},
'days_abbrev': {
0: '일',
1: '월',
2: '화',
3: '수',
4: '목',
5: '금',
6: '토'
},
# Months
'months': {
1: '1월',
2: '2월',
3: '3월',
4: '4월',
5: '5월',
6: '6월',
7: '7월',
8: '8월',
9: '9월',
10: '10월',
11: '11월',
12: '12월',
},
'months_abbrev': {
1: ' 1',
2: ' 2',
3: ' 3',
4: ' 4',
5: ' 5',
6: ' 6',
7: ' 7',
8: ' 8',
9: ' 9',
10: '10',
11: '11',
12: '12',
},
# Units of time
'year': '{count} 년',
'month': '{count} 개월',
'week': '{count} 주일',
'day': '{count} 일',
'hour': '{count} 시간',
'minute': '{count} 분',
'second': '{count} 초',
# Relative time
'ago': '{time} 전',
'from_now': '{time} 후',
'after': '{time} 뒤',
'before': '{time} 앞',
# Meridians
'meridian': lambda time: '오전' if 0 <= time[0] < 12 else '오후',
# Date formats
'date_formats': {
'LTS': 'A h시 m분 s초',
'LT': 'A h시 m분',
'LLLL': 'YYYY년 MMMM D일 dddd A h시 m분',
'LLL': 'YYYY년 MMMM D일 A h시 m분',
'LL': 'YYYY년 MMMM D일',
'L': 'YYYY.MM.DD',
},
}
|
cc103c98d0e6f321e01c2dbe2e963cefa470807a
|
4e1dd6791505a154d92d6352e619c254e12ed574
|
/wagtail_localize/tests/test_tasks.py
|
b96eff6284a31a3a843d7578634c096eb3190c4d
|
[
"BSD-3-Clause"
] |
permissive
|
wagtail/wagtail-localize
|
fba4b9db6c1b043a96d59178dede31dc117f9674
|
3f060bd061249815a8fbc79dc1c6e3e954ee3ecb
|
refs/heads/main
| 2023-08-04T14:32:06.683373
| 2023-07-27T15:02:52
| 2023-07-27T15:02:52
| 179,706,908
| 183
| 72
|
NOASSERTION
| 2023-09-07T08:33:48
| 2019-04-05T15:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 647
|
py
|
test_tasks.py
|
from unittest import mock
from django.test import TestCase, override_settings
from wagtail_localize.tasks import get_backend
@override_settings(
WAGTAILLOCALIZE_JOBS={
"BACKEND": "wagtail_localize.tasks.DjangoRQJobBackend",
"OPTIONS": {"queue": "test_queue"},
}
)
@mock.patch("django_rq.get_queue")
class TestDjangoRQBackend(TestCase):
def test_enqueue_with_django_rq(self, get_queue):
backend = get_backend()
backend.enqueue(print, ["Hello world!"], {"end": "\r\n"})
get_queue.assert_called_with("default")
get_queue().enqueue.assert_called_with(print, "Hello world!", end="\r\n")
|
268942093cce70a3a832241466b76cdcecd7172b
|
360ef22d5900573a4f0ed69b5f36109acea05e2b
|
/vumi/tests/test_utils.py
|
7478a3e0b02b246e09c02848075e271e8689d9d8
|
[
"BSD-2-Clause"
] |
permissive
|
praekeltfoundation/vumi
|
8e756580b9027f2d7e7ffd06dcf07694188e4a39
|
b74b5dac95df778519f54c670a353e4bda496df9
|
refs/heads/develop
| 2023-08-28T15:04:56.021337
| 2020-11-16T07:55:34
| 2020-11-16T07:55:34
| 1,081,665
| 117
| 18
|
BSD-3-Clause
| 2023-06-19T17:44:45
| 2010-11-15T11:34:03
|
Python
|
UTF-8
|
Python
| false
| false
| 19,072
|
py
|
test_utils.py
|
import os.path
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from twisted.internet.error import ConnectionDone
from twisted.internet.protocol import Protocol, Factory
from twisted.internet.task import Clock
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.web import http
from twisted.web.client import WebClientContextFactory, ResponseFailed
from vumi.utils import (
normalize_msisdn, vumi_resource_path, cleanup_msisdn, get_operator_name,
http_request, http_request_full, get_first_word, redis_from_config,
build_web_site, LogFilterSite, PkgResources, HttpTimeoutError,
StatusEdgeDetector)
from vumi.message import TransportStatus
from vumi.persist.fake_redis import FakeRedis
from vumi.tests.fake_connection import (
FakeServer, FakeHttpServer, ProxyAgentWithContext, wait0)
from vumi.tests.helpers import VumiTestCase, import_skip
class DummyRequest(object):
def __init__(self, postpath, prepath):
self.postpath = postpath
self.prepath = prepath
class TestNormalizeMsisdn(VumiTestCase):
def test_leading_zero(self):
self.assertEqual(normalize_msisdn('0761234567', '27'),
'+27761234567')
def test_double_leading_zero(self):
self.assertEqual(normalize_msisdn('0027761234567', '27'),
'+27761234567')
def test_leading_plus(self):
self.assertEqual(normalize_msisdn('+27761234567', '27'),
'+27761234567')
def test_no_leading_plus_or_zero(self):
self.assertEqual(normalize_msisdn('27761234567', '27'),
'+27761234567')
def test_short_address(self):
self.assertEqual(normalize_msisdn('1234'), '1234')
self.assertEqual(normalize_msisdn('12345'), '12345')
def test_short_address_with_leading_plus(self):
self.assertEqual(normalize_msisdn('+12345'), '+12345')
def test_unicode_addr_remains_unicode(self):
addr = normalize_msisdn(u'0761234567', '27')
self.assertEqual(addr, u'+27761234567')
self.assertTrue(isinstance(addr, unicode))
def test_str_addr_remains_str(self):
addr = normalize_msisdn('0761234567', '27')
self.assertEqual(addr, '+27761234567')
self.assertTrue(isinstance(addr, str))
class TestUtils(VumiTestCase):
def test_make_campaign_path_abs(self):
vumi_tests_path = os.path.dirname(__file__)
vumi_path = os.path.dirname(os.path.dirname(vumi_tests_path))
self.assertEqual('/foo/bar', vumi_resource_path('/foo/bar'))
self.assertEqual(os.path.join(vumi_path, 'vumi/resources/foo/bar'),
vumi_resource_path('foo/bar'))
def test_cleanup_msisdn(self):
self.assertEqual('27761234567', cleanup_msisdn('27761234567', '27'))
self.assertEqual('27761234567', cleanup_msisdn('+27761234567', '27'))
self.assertEqual('27761234567', cleanup_msisdn('0761234567', '27'))
def test_get_operator_name(self):
mapping = {'27': {'2782': 'VODACOM', '2783': 'MTN'}}
self.assertEqual('MTN', get_operator_name('27831234567', mapping))
self.assertEqual('VODACOM', get_operator_name('27821234567', mapping))
self.assertEqual('UNKNOWN', get_operator_name('27801234567', mapping))
def test_get_first_word(self):
self.assertEqual('KEYWORD',
get_first_word('KEYWORD rest of the message'))
self.assertEqual('', get_first_word(''))
self.assertEqual('', get_first_word(None))
def test_redis_from_config_str(self):
try:
fake_redis = redis_from_config("FAKE_REDIS")
except ImportError, e:
import_skip(e, 'redis')
self.assertTrue(isinstance(fake_redis, FakeRedis))
def test_redis_from_config_fake_redis(self):
fake_redis = FakeRedis()
try:
self.assertEqual(redis_from_config(fake_redis), fake_redis)
except ImportError, e:
import_skip(e, 'redis')
def get_resource(self, path, site):
request = DummyRequest(postpath=path.split('/'), prepath=[])
return site.getResourceFor(request)
def test_build_web_site(self):
resource_a = Resource()
resource_b = Resource()
site = build_web_site({
'foo/a': resource_a,
'bar/b': resource_b,
})
self.assertEqual(self.get_resource('foo/a', site), resource_a)
self.assertEqual(self.get_resource('bar/b', site), resource_b)
self.assertTrue(isinstance(site, LogFilterSite))
def test_build_web_site_with_overlapping_paths(self):
resource_a = Resource()
resource_b = Resource()
site = build_web_site({
'foo/a': resource_a,
'foo/b': resource_b,
})
self.assertEqual(self.get_resource('foo/a', site), resource_a)
self.assertEqual(self.get_resource('foo/b', site), resource_b)
self.assertTrue(isinstance(site, LogFilterSite))
def test_build_web_site_with_custom_site_class(self):
site = build_web_site({}, site_class=Site)
self.assertTrue(isinstance(site, Site))
self.assertFalse(isinstance(site, LogFilterSite))
class FakeHTTP10(Protocol):
def dataReceived(self, data):
self.transport.write(self.factory.response_body)
self.transport.loseConnection()
class TestHttpUtils(VumiTestCase):
def setUp(self):
self.fake_http = FakeHttpServer(lambda r: self._render_request(r))
self.url = "http://example.com:9980/"
def set_render(self, f):
def render(request):
request.setHeader('Content-Type', 'text/plain')
try:
data = f(request)
request.setResponseCode(http.OK)
except Exception, err:
data = str(err)
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
return data
self._render_request = render
def set_async_render(self):
def render_interrupt(request):
reactor.callLater(0, d.callback, request)
return NOT_DONE_YET
d = Deferred()
self.set_render(render_interrupt)
return d
@inlineCallbacks
def make_real_webserver(self):
"""
Construct a real webserver to test actual connectivity.
"""
root = Resource()
root.isLeaf = True
root.render = lambda r: self._render_request(r)
site_factory = Site(root)
webserver = yield reactor.listenTCP(
0, site_factory, interface='127.0.0.1')
self.add_cleanup(webserver.loseConnection)
addr = webserver.getHost()
url = "http://%s:%s/" % (addr.host, addr.port)
returnValue(url)
def with_agent(self, f, *args, **kw):
"""
Wrapper around http_request_full and friends that injects our fake
connection's agent.
"""
kw.setdefault('agent_class', self.fake_http.get_agent)
return f(*args, **kw)
@inlineCallbacks
def test_http_request_to_localhost(self):
"""
Make a request over the network (localhost) to check that we're getting
a real agent by default.
"""
url = yield self.make_real_webserver()
self.set_render(lambda r: "Yay")
data = yield http_request(url, '')
self.assertEqual(data, "Yay")
@inlineCallbacks
def test_http_request_ok(self):
self.set_render(lambda r: "Yay")
data = yield self.with_agent(http_request, self.url, '')
self.assertEqual(data, "Yay")
@inlineCallbacks
def test_http_request_err(self):
def err(r):
raise ValueError("Bad")
self.set_render(err)
data = yield self.with_agent(http_request, self.url, '')
self.assertEqual(data, "Bad")
@inlineCallbacks
def test_http_request_full_to_localhost(self):
"""
Make a request over the network (localhost) to check that we're getting
a real agent by default.
"""
url = yield self.make_real_webserver()
self.set_render(lambda r: "Yay")
request = yield http_request_full(url, '')
self.assertEqual(request.delivered_body, "Yay")
self.assertEqual(request.code, http.OK)
self.set_render(lambda r: "Yay")
@inlineCallbacks
def test_http_request_with_custom_context_factory(self):
self.set_render(lambda r: "Yay")
agents = []
ctxt = WebClientContextFactory()
def stashing_factory(reactor, contextFactory=None, pool=None):
agent = self.fake_http.get_agent(
reactor, contextFactory=contextFactory, pool=pool)
agents.append(agent)
return agent
request = yield http_request_full(
self.url, '', context_factory=ctxt, agent_class=stashing_factory)
self.assertEqual(request.delivered_body, "Yay")
self.assertEqual(request.code, http.OK)
[agent] = agents
self.assertEqual(agent.contextFactory, ctxt)
@inlineCallbacks
def test_http_request_full_drop(self):
"""
If a connection drops, we get an appropriate exception.
"""
got_request = self.set_async_render()
got_data = self.with_agent(http_request_full, self.url, '')
request = yield got_request
request.setResponseCode(http.OK)
request.write("Foo!")
request.transport.loseConnection()
yield self.assertFailure(got_data, ResponseFailed)
@inlineCallbacks
def test_http_request_full_ok(self):
self.set_render(lambda r: "Yay")
request = yield self.with_agent(http_request_full, self.url, '')
self.assertEqual(request.delivered_body, "Yay")
self.assertEqual(request.code, http.OK)
@inlineCallbacks
def test_http_request_full_headers(self):
def check_ua(request):
self.assertEqual('blah', request.getHeader('user-agent'))
return "Yay"
self.set_render(check_ua)
request = yield self.with_agent(
http_request_full, self.url, '', {'User-Agent': ['blah']})
self.assertEqual(request.delivered_body, "Yay")
self.assertEqual(request.code, http.OK)
request = yield self.with_agent(
http_request_full, self.url, '', {'User-Agent': 'blah'})
self.assertEqual(request.delivered_body, "Yay")
self.assertEqual(request.code, http.OK)
@inlineCallbacks
def test_http_request_full_err(self):
def err(r):
raise ValueError("Bad")
self.set_render(err)
request = yield self.with_agent(http_request_full, self.url, '')
self.assertEqual(request.delivered_body, "Bad")
self.assertEqual(request.code, http.INTERNAL_SERVER_ERROR)
@inlineCallbacks
def test_http_request_potential_data_loss(self):
"""
In the absence of a Content-Length header or chunked transfer encoding,
we need to swallow a PotentialDataLoss exception.
"""
# We can't use Twisted's HTTP server, because that always does the
# sensible thing. We also pretend to be HTTP 1.0 for simplicity.
factory = Factory()
factory.protocol = FakeHTTP10
factory.response_body = (
"HTTP/1.0 201 CREATED\r\n"
"Date: Mon, 23 Jan 2012 15:08:47 GMT\r\n"
"Server: Fake HTTP 1.0\r\n"
"Content-Type: text/html; charset=utf-8\r\n"
"\r\n"
"Yay")
fake_server = FakeServer(factory)
agent_factory = lambda *a, **kw: ProxyAgentWithContext(
fake_server.endpoint, *a, **kw)
data = yield http_request(self.url, '', agent_class=agent_factory)
self.assertEqual(data, "Yay")
@inlineCallbacks
def test_http_request_full_data_limit(self):
self.set_render(lambda r: "Four")
d = self.with_agent(http_request_full, self.url, '', data_limit=3)
def check_response(reason):
self.assertTrue(reason.check('vumi.utils.HttpDataLimitError'))
self.assertEqual(reason.getErrorMessage(),
"More than 3 bytes received")
d.addBoth(check_response)
yield d
@inlineCallbacks
def test_http_request_full_ok_with_timeout_set(self):
"""
If a request completes within the timeout, everything is happy.
"""
clock = Clock()
self.set_render(lambda r: "Yay")
response = yield self.with_agent(
http_request_full, self.url, '', timeout=30, reactor=clock)
self.assertEqual(response.delivered_body, "Yay")
self.assertEqual(response.code, http.OK)
# Advance the clock past the timeout limit.
clock.advance(30)
@inlineCallbacks
def test_http_request_full_drop_with_timeout_set(self):
"""
If a request fails within the timeout, everything is happy(ish).
"""
clock = Clock()
d = self.set_async_render()
got_data = self.with_agent(
http_request_full, self.url, '', timeout=30, reactor=clock)
request = yield d
request.setResponseCode(http.OK)
request.write("Foo!")
request.transport.loseConnection()
yield self.assertFailure(got_data, ResponseFailed)
# Advance the clock past the timeout limit.
clock.advance(30)
def test_http_request_full_timeout_before_connect(self):
"""
A request can time out before a connection is made.
"""
clock = Clock()
# Instead of setting a render function, we tell the server not to
# accept connections.
self.fake_http.fake_server.auto_accept = False
d = self.with_agent(
http_request_full, self.url, '', timeout=30, reactor=clock)
self.assertNoResult(d)
clock.advance(29)
self.assertNoResult(d)
clock.advance(1)
self.failureResultOf(d, HttpTimeoutError)
@inlineCallbacks
def test_http_request_full_timeout_after_connect(self):
"""
The client disconnects after the timeout if no data has been received
from the server.
"""
clock = Clock()
request_started = self.set_async_render()
client_done = self.with_agent(
http_request_full, self.url, '', timeout=30, reactor=clock)
yield request_started
self.assertNoResult(client_done)
clock.advance(29)
self.assertNoResult(client_done)
clock.advance(1)
failure = self.failureResultOf(client_done, HttpTimeoutError)
self.assertEqual(
failure.getErrorMessage(), "Timeout while connecting")
@inlineCallbacks
def test_http_request_full_timeout_after_first_receive(self):
"""
The client disconnects after the timeout even if some data has already
been received.
"""
clock = Clock()
request_started = self.set_async_render()
client_done = self.with_agent(
http_request_full, self.url, '', timeout=30, reactor=clock)
request = yield request_started
request_done = request.notifyFinish()
request.write("some data")
clock.advance(1)
yield wait0()
self.assertNoResult(client_done)
self.assertNoResult(request_done)
clock.advance(28)
self.assertNoResult(client_done)
self.assertNoResult(request_done)
clock.advance(1)
failure = self.failureResultOf(client_done, HttpTimeoutError)
self.assertEqual(
failure.getErrorMessage(), "Timeout while receiving data")
yield self.assertFailure(request_done, ConnectionDone)
class TestPkgResources(VumiTestCase):
vumi_tests_path = os.path.dirname(__file__)
def test_absolute_path(self):
pkg = PkgResources("vumi.tests")
self.assertEqual('/foo/bar', pkg.path('/foo/bar'))
def test_relative_path(self):
pkg = PkgResources("vumi.tests")
self.assertEqual(os.path.join(self.vumi_tests_path, 'foo/bar'),
pkg.path('foo/bar'))
class TestStatusEdgeDetector(VumiTestCase):
def test_status_not_change(self):
'''If the status doesn't change, None should be returned.'''
sed = StatusEdgeDetector()
status1 = {
'component': 'foo',
'status': 'ok',
'type': 'bar',
'message': 'test'}
self.assertEqual(sed.check_status(**status1), status1)
status2 = {
'component': 'foo',
'status': 'ok',
'type': 'bar',
'message': 'another test'}
self.assertEqual(sed.check_status(**status2), None)
def test_status_change(self):
'''If the status does change, the status should be returned.'''
sed = StatusEdgeDetector()
status1 = {
'component': 'foo',
'status': 'ok',
'type': 'bar',
'message': 'test'}
self.assertEqual(sed.check_status(**status1), status1)
status2 = {
'component': 'foo',
'status': 'degraded',
'type': 'bar',
'message': 'another test'}
self.assertEqual(sed.check_status(**status2), status2)
def test_components_separate(self):
'''A state change in one component should not affect other
components.'''
sed = StatusEdgeDetector()
comp1_status1 = {
'component': 'foo',
'status': 'ok',
'type': 'bar',
'message': 'test'}
self.assertEqual(sed.check_status(**comp1_status1), comp1_status1)
comp2_status1 = {
'component': 'bar',
'status': 'ok',
'type': 'bar',
'message': 'another test'}
self.assertEqual(sed.check_status(**comp2_status1), comp2_status1)
comp2_status2 = {
'component': 'bar',
'status': 'degraded',
'type': 'bar',
'message': 'another test'}
self.assertEqual(sed.check_status(**comp2_status2), comp2_status2)
comp1_status2 = {
'component': 'foo',
'status': 'ok',
'type': 'bar',
'message': 'test'}
self.assertEqual(sed.check_status(**comp1_status2), None)
def test_type_change(self):
'''A change in status type should result in the status being
returned.'''
sed = StatusEdgeDetector()
status1 = {
'component': 'foo',
'status': 'ok',
'type': 'bar',
'message': 'test'}
self.assertEqual(sed.check_status(**status1), status1)
status2 = {
'component': 'foo',
'status': 'ok',
'type': 'baz',
'message': 'test'}
self.assertEqual(sed.check_status(**status2), status2)
|
976676019b1ebf5e645d7884c3960c559d5d7ec5
|
e5e0d729f082999a9bec142611365b00f7bfc684
|
/tensorflow/contrib/checkpoint/python/containers.py
|
a25d51980ea760dfb7f323497a397fbd94fd5f23
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/tensorflow
|
ed6294098c7354dfc9f09631fc5ae22dbc278138
|
7cbba04a2ee16d21309eefad5be6585183a2d5a9
|
refs/heads/r1.15.5+nv23.03
| 2023-08-16T22:25:18.037979
| 2023-08-03T22:09:23
| 2023-08-03T22:09:23
| 263,748,045
| 763
| 117
|
Apache-2.0
| 2023-07-03T15:45:19
| 2020-05-13T21:34:32
|
C++
|
UTF-8
|
Python
| false
| false
| 2,943
|
py
|
containers.py
|
"""Trackable data structures."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training.tracking import base as trackable_lib
from tensorflow.python.training.tracking import data_structures
class UniqueNameTracker(data_structures.TrackableDataStructure):
"""Adds dependencies on trackable objects with name hints.
Useful for creating dependencies with locally unique names.
Example usage:
```python
class SlotManager(tf.contrib.checkpoint.Checkpointable):
def __init__(self):
# Create a dependency named "slotdeps" on the container.
self.slotdeps = tf.contrib.checkpoint.UniqueNameTracker()
slotdeps = self.slotdeps
slots = []
slots.append(slotdeps.track(tf.Variable(3.), "x")) # Named "x"
slots.append(slotdeps.track(tf.Variable(4.), "y"))
slots.append(slotdeps.track(tf.Variable(5.), "x")) # Named "x_1"
```
"""
def __init__(self):
super(UniqueNameTracker, self).__init__()
self._maybe_initialize_trackable()
self._name_counts = {}
@property
def _values(self):
return [dep.ref for dep in self._checkpoint_dependencies]
def track(self, trackable, base_name):
"""Add a dependency on `trackable`.
Args:
trackable: An object to add a checkpoint dependency on.
base_name: A name hint, which is uniquified to determine the dependency
name.
Returns:
`trackable`, for chaining.
Raises:
ValueError: If `trackable` is not a trackable object.
"""
if not isinstance(trackable, trackable_lib.Trackable):
raise ValueError(
("Expected a trackable value, got %s which does not inherit "
"from tf.track.Trackable.") % (trackable,))
def _format_name(prefix, number):
if number > 0:
return "%s_%d" % (prefix, number)
else:
return prefix
count = self._name_counts.get(base_name, 0)
candidate = _format_name(base_name, count)
while self._lookup_dependency(candidate) is not None:
count += 1
candidate = _format_name(base_name, count)
self._name_counts[base_name] = count + 1
self._track_value(trackable, name=candidate)
return trackable
|
413bd7ab86efe9fdc979bb22c4e5bdacad0714b4
|
fa6e1299ef52ca2d4a13b3788d2a4d0540728f81
|
/tests/unit/transform/test_post.py
|
e21802afacbbdd0de900b3169e0168468fc964d9
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAILabel
|
c3abd164255a50279fc5aa6a87f4336fff4d6833
|
c90f42c0730554e3a05af93645ae84ccdcb5e14b
|
refs/heads/main
| 2023-09-01T21:44:42.465238
| 2023-08-31T17:17:08
| 2023-08-31T17:17:08
| 351,826,770
| 448
| 167
|
Apache-2.0
| 2023-09-14T12:06:28
| 2021-03-26T15:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,996
|
py
|
test_post.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
from parameterized import parameterized
from monailabel.transform.post import (
BoundingBoxd,
DumpImagePrediction2Dd,
ExtremePointsd,
FindContoursd,
LargestCCd,
MergeAllPreds,
RenameKeyd,
Restored,
)
CCD_DATA = [
{"keys": ("pred",)},
{"pred": np.array([[[1, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])},
np.array([[[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]),
]
EXTREME_POINTS_DATA = [
{"keys": "pred"},
{"pred": np.array([[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]])},
[[0, 1, 1], [0, 1, 1], [0, 1, 1], [0, 1, 1], [0, 1, 1], [0, 1, 1]],
]
BB_DATA = [
{"keys": "pred"},
{"pred": np.array([[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]])},
[[1, 1], [3, 3]],
]
RESTORED_DATA = [
{"keys": "pred", "ref_image": "ref"},
{
"pred": np.array([[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]]),
"ref_meta_dict": {
"spatial_shape": [1, 6, 6],
},
},
(1, 6, 6),
]
FINDCONTOURSD_DATA = [
{"keys": "pred", "labels": "Other", "min_positive": 4, "min_poly_area": 1},
{
"pred": np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]),
},
[[[1, 2], [2, 1], [3, 2], [2, 3]], [[1, 1], [1, 3], [3, 3], [3, 1]]],
]
DUMPIMAGEPREDICTION2DD_DATA = [
{
"image": np.random.rand(1, 3, 5, 5),
"pred": np.random.rand(1, 5, 5),
},
]
METGEAllPREDS_DATA = [
{"keys": ["pred", "pred_2"]},
{
"pred": np.array([[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]]),
"pred_2": np.array([[[1, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]),
},
[[[1, 0, 0, 1], [1, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]]],
]
RENAMEKEY_DATA = [
{"source_key": "pred", "target_key": "pred_2"},
{"pred": np.array([[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]])},
]
class TestLargestCCd(unittest.TestCase):
@parameterized.expand([CCD_DATA])
def test_result(self, args, input_data, expected_output):
res = LargestCCd(**args)(input_data)
np.testing.assert_equal(res["pred"], expected_output)
class TestExtremePointsd(unittest.TestCase):
@parameterized.expand([EXTREME_POINTS_DATA])
def test_result(self, args, input_data, expected_data):
res = ExtremePointsd(**args)(input_data)
self.assertEqual(res["result"]["points"], expected_data)
class TestBoundingBoxd(unittest.TestCase):
@parameterized.expand([BB_DATA])
def test_result(self, args, input_data, expected_data):
res = BoundingBoxd(**args)(input_data)
self.assertEqual(res["result"]["bbox"], expected_data)
class TestRestored(unittest.TestCase):
@parameterized.expand([RESTORED_DATA])
def test_result(self, args, input_data, expected_shape):
res = Restored(**args)(input_data)
self.assertEqual(res["pred"].shape, expected_shape)
class TestFindContoursd(unittest.TestCase):
@parameterized.expand([FINDCONTOURSD_DATA])
def test_result(self, args, input_data, expected_output):
res = FindContoursd(**args)(input_data)
self.assertEqual(res["result"]["annotation"]["elements"][0]["contours"], expected_output)
class TestDumpImagePrediction2Dd(unittest.TestCase):
@parameterized.expand([DUMPIMAGEPREDICTION2DD_DATA])
def test_saved_content(self, input_data):
with tempfile.TemporaryDirectory() as tempdir:
image_path = os.path.join(tempdir, "testimage.png")
pred_path = os.path.join(tempdir, "testpred.png")
_ = DumpImagePrediction2Dd(image_path=image_path, pred_path=pred_path, pred_only=False)(input_data)
self.assertTrue(os.path.exists(image_path))
self.assertTrue(os.path.exists(pred_path))
class TestMergeAllPreds(unittest.TestCase):
@parameterized.expand([METGEAllPREDS_DATA])
def test_merge_pred(self, args, input_data, expected_output):
res = MergeAllPreds(**args)(input_data)
self.assertEqual(res.tolist(), expected_output)
class TestRenameKeyd(unittest.TestCase):
@parameterized.expand([RENAMEKEY_DATA])
def test_rename_key(self, args, input_data):
res = RenameKeyd(**args)(input_data)
self.assertEqual(list(res.keys())[0], args["target_key"])
if __name__ == "__main__":
unittest.main()
|
5d016b723c6a2759ba956d4216796798639f3747
|
b5a6d68eabc99fcd8ff9a456cc52fb8ec7f588e6
|
/chapter_10/p02_group_anagrams.py
|
968e525e2bc9b3c1c3ce15baf5cb12a0d3be4f85
|
[] |
no_license
|
careercup/CtCI-6th-Edition-Python
|
89ce160feddc3065e53dee9ea0f3565fec4466c4
|
6be09720f400e892d416747adde383545bc650d2
|
refs/heads/master
| 2023-08-31T05:51:25.107342
| 2022-11-06T09:23:08
| 2022-12-18T05:12:18
| 51,567,632
| 5,034
| 2,089
| null | 2023-09-12T05:11:10
| 2016-02-12T04:47:40
|
Python
|
UTF-8
|
Python
| false
| false
| 859
|
py
|
p02_group_anagrams.py
|
from collections import defaultdict
def group_anagrams(words):
anagrams = defaultdict(list)
for word in words:
sorted_word = "".join(sorted(word.lower()))
anagrams[sorted_word].append(word)
sorted_words = []
for similar_words in anagrams.values():
sorted_words.extend(similar_words)
return sorted_words
def test_group_anagrams():
words = ["abed", "later", "bead", "alert", "altered", "bade", "alter", "alerted"]
expected_sort = [
"abed",
"bead",
"bade",
"later",
"alert",
"alter",
"altered",
"alerted",
]
assert group_anagrams(words) == expected_sort
def example():
words = ["abed", "later", "bead", "alert", "altered", "bade", "alter", "alerted"]
print(group_anagrams(words))
if __name__ == "__main__":
example()
|
43036e61e7214b561c485f11e07a5cb95336e7b6
|
a4dd3fc9021a2b6e218da7ffd35fd4dba48b8f86
|
/Python/Sorting Algorithms/topological-sort.py
|
e01a7486ffb02be6c5d0f0a0ab4c074985baa8e5
|
[
"MIT"
] |
permissive
|
starkblaze01/Algorithms-Cheatsheet-Resources
|
19d23705e32c5e5c5408bcd3a3d1d9df289e0cd9
|
ac59f34f3f03b189d858ed1afe6a2ab83402d408
|
refs/heads/master
| 2023-05-26T18:15:31.006039
| 2023-01-31T10:00:01
| 2023-01-31T10:00:01
| 158,492,042
| 289
| 146
|
MIT
| 2023-05-23T02:22:20
| 2018-11-21T04:45:37
|
Python
|
UTF-8
|
Python
| false
| false
| 510
|
py
|
topological-sort.py
|
def topological_sort(graph, start):
seen = set()
stack = [] #path variable is gone, stack and order are new
order = [] #order will be in reverse order at first
s = [start]
while s:
x = s.pop()
if x not in seen:
seen.add(x) #no need to apend to path
s.extend(graph[x])
while stack and x not in graph[stack[-1]]:
order.append(stack.pop())
stack.append(x)
return stack + order[::-1]
|
bf43e991eea554cbb7477dd87bc6a02ae4df918c
|
80d505489f5354d4b29156d6eea7e3516162bcc7
|
/exercises/practice/markdown/markdown.py
|
3c4bd2fa86a7d9863bf41868b21b133787a6d2b6
|
[
"Python-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
exercism/python
|
419e89690070eef42fc4c932faa0df0706d5c222
|
1e71b8a00c8b34c251d785f0a10843efc5234994
|
refs/heads/main
| 2023-08-29T03:18:02.845245
| 2023-08-25T12:50:16
| 2023-08-25T12:50:16
| 17,274,389
| 1,588
| 1,513
|
MIT
| 2023-09-14T20:33:13
| 2014-02-28T03:48:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,720
|
py
|
markdown.py
|
import re
def parse(markdown):
lines = markdown.split('\n')
res = ''
in_list = False
in_list_append = False
for i in lines:
if re.match('###### (.*)', i) is not None:
i = '<h6>' + i[7:] + '</h6>'
elif re.match('##### (.*)', i) is not None:
i = '<h5>' + i[6:] + '</h5>'
elif re.match('#### (.*)', i) is not None:
i = '<h4>' + i[5:] + '</h4>'
elif re.match('### (.*)', i) is not None:
i = '<h3>' + i[4:] + '</h3>'
elif re.match('## (.*)', i) is not None:
i = '<h2>' + i[3:] + '</h2>'
elif re.match('# (.*)', i) is not None:
i = '<h1>' + i[2:] + '</h1>'
m = re.match(r'\* (.*)', i)
if m:
if not in_list:
in_list = True
is_bold = False
is_italic = False
curr = m.group(1)
m1 = re.match('(.*)__(.*)__(.*)', curr)
if m1:
curr = m1.group(1) + '<strong>' + \
m1.group(2) + '</strong>' + m1.group(3)
is_bold = True
m1 = re.match('(.*)_(.*)_(.*)', curr)
if m1:
curr = m1.group(1) + '<em>' + m1.group(2) + \
'</em>' + m1.group(3)
is_italic = True
i = '<ul><li>' + curr + '</li>'
else:
is_bold = False
is_italic = False
curr = m.group(1)
m1 = re.match('(.*)__(.*)__(.*)', curr)
if m1:
is_bold = True
m1 = re.match('(.*)_(.*)_(.*)', curr)
if m1:
is_italic = True
if is_bold:
curr = m1.group(1) + '<strong>' + \
m1.group(2) + '</strong>' + m1.group(3)
if is_italic:
curr = m1.group(1) + '<em>' + m1.group(2) + \
'</em>' + m1.group(3)
i = '<li>' + curr + '</li>'
else:
if in_list:
in_list_append = True
in_list = False
m = re.match('<h|<ul|<p|<li', i)
if not m:
i = '<p>' + i + '</p>'
m = re.match('(.*)__(.*)__(.*)', i)
if m:
i = m.group(1) + '<strong>' + m.group(2) + '</strong>' + m.group(3)
m = re.match('(.*)_(.*)_(.*)', i)
if m:
i = m.group(1) + '<em>' + m.group(2) + '</em>' + m.group(3)
if in_list_append:
i = '</ul>' + i
in_list_append = False
res += i
if in_list:
res += '</ul>'
return res
|
0235f1167782cc328f44f056bf6362e789994654
|
ddd46f20859c48fd35136ffe775a4b5c7b549949
|
/code/chp12-scraping/selenium_form_submit.py
|
d3e9e5623241b2470403b89f13c9d45eb55e61c8
|
[] |
no_license
|
jackiekazil/data-wrangling
|
0a0200d5350dc686907d0abc472f5dce92ac5985
|
9d56a0c6cf8067246d9ee70246db4578fdd08354
|
refs/heads/master
| 2023-02-22T09:25:15.015542
| 2022-11-08T16:05:06
| 2022-11-08T16:05:06
| 40,521,573
| 583
| 651
| null | 2023-02-10T20:20:17
| 2015-08-11T04:36:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
selenium_form_submit.py
|
from selenium import webdriver
from time import sleep
browser = webdriver.Firefox()
browser.get('http://google.com')
inputs = browser.find_elements_by_css_selector('form input')
for i in inputs:
if i.is_displayed():
search_bar = i
break
search_bar.send_keys('web scraping with python')
search_button = browser.find_element_by_css_selector('form button')
search_button.click()
browser.implicitly_wait(10)
results = browser.find_elements_by_css_selector('div h3 a')
for r in results:
action = webdriver.ActionChains(browser)
action.move_to_element(r)
action.perform()
sleep(2)
browser.quit()
|
755c87fbd73ec97ac75e678ff62cd32aa1f3d886
|
99833651e4a6a0bc1221d577d9fc43b8568abedd
|
/nltk_contrib/misc/langid.py
|
2211a17bf35489dce107a86b4c7b031d93a15a32
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
nltk/nltk_contrib
|
689e2683aa01b120c7473b9a4fc50bc49f014390
|
95d1806e2f4e89e960b76a685b1fba2eaa7d5142
|
refs/heads/master
| 2023-07-31T13:32:47.358897
| 2022-11-21T18:49:33
| 2022-11-21T18:49:33
| 2,530,774
| 145
| 127
|
NOASSERTION
| 2022-11-21T18:49:34
| 2011-10-07T05:59:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
langid.py
|
"""
Sam Huston 2007
This is a simulation of the article:
"Evaluation of a language identification system for mono- and multilingual text documents"
by Artemenko, O; Mandl, T; Shramko, M; Womser-Hacker, C.
presented at: Applied Computing 2006, 21st Annual ACM Symposium on Applied Computing; 23-27 April 2006
This implementation is intended for monolingual documents only,
however it is performed over a much larger range of languages.
Additionally three supervised methods of classification are explored:
Cosine distance, NaiveBayes, and Spearman-rho
"""
from nltk_contrib import classify
from nltk import detect
from nltk.corpus import udhr
import string
def run(classifier, training_data, gold_data):
classifier.train(training_data)
correct = 0
for lang in gold_data:
cls = classifier.get_class(gold_data[lang])
if cls == lang:
correct += 1
print correct, "in", len(gold_data), "correct"
# features: character bigrams
fd = detect.feature({"char-bigrams" : lambda t: [string.join(t)[n:n+2] for n in range(len(t)-1)]})
training_data = udhr.langs(['English-Latin1', 'French_Francais-Latin1', 'Indonesian-Latin1', 'Zapoteco-Latin1'])
gold_data = {}
for lang in training_data:
gold_data[lang] = training_data[lang][:50]
training_data[lang] = training_data[lang][100:200]
print "Cosine classifier: ",
run(classify.Cosine(fd), training_data, gold_data)
print "Naivebayes classifier: ",
run(classify.NaiveBayes(fd), training_data, gold_data)
print "Spearman classifier: ",
run(classify.Spearman(fd), training_data, gold_data)
|
3c5e57f85ee34f53625744a15de825f1e3e9041a
|
2d6323b8ccaf08a8929dba79fb9575c436977bd4
|
/docassemble_demo/docassemble/demo/fruit_database.py
|
efbb815b7e063e14ef55f0ebacf1e18651947345
|
[
"MIT"
] |
permissive
|
jhpyle/docassemble
|
f1c36e73d02807a7052b860dfceecdfa88e728c7
|
8726242cfbe3a15cad610dc2b518346be68ab142
|
refs/heads/master
| 2023-09-01T20:03:39.497473
| 2023-08-26T12:44:45
| 2023-08-26T12:44:45
| 34,148,903
| 691
| 300
|
MIT
| 2023-09-09T20:08:14
| 2015-04-18T02:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 835
|
py
|
fruit_database.py
|
import pandas
from docassemble.base.util import path_and_mimetype
__all__ = ['get_fruit_names', 'fruit_info']
fruit_info_by_name = {}
fruit_names = []
def read_data(filename):
the_xlsx_file, mimetype = path_and_mimetype(filename) # pylint: disable=unused-variable
df = pandas.read_excel(the_xlsx_file)
for indexno in df.index:
if not df['Name'][indexno]:
continue
fruit_names.append(df['Name'][indexno])
fruit_info_by_name[df['Name'][indexno]] = {"color": df['Color'][indexno], "seeds": df['Seeds'][indexno]}
def get_fruit_names():
return fruit_names
def fruit_info(fruit):
if fruit not in fruit_info_by_name:
raise Exception("Reference to invalid fruit " + fruit)
return fruit_info_by_name[fruit]
read_data('docassemble.demo:data/sources/fruit_data.xlsx')
|
311c9025e49d1111702d194bb78fe4ca54bef031
|
8de1480d6511ac81c43ebb1fa50875adb1505c3b
|
/awx/main/tests/conftest.py
|
28565901b06a879f8e8554258d679c980ed371e6
|
[
"Apache-2.0"
] |
permissive
|
ansible/awx
|
bbbb0f3f43835a37fbb3eb3dcd7cfe98116fbbba
|
5e105c2cbd3fe828160540b3043cf6f605ed26be
|
refs/heads/devel
| 2023-08-31T11:45:01.446444
| 2023-08-31T04:58:57
| 2023-08-31T04:58:57
| 91,594,105
| 13,353
| 4,186
|
NOASSERTION
| 2023-09-14T20:20:07
| 2017-05-17T15:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,607
|
py
|
conftest.py
|
# Python
import pytest
from unittest import mock
from contextlib import contextmanager
from awx.main.models import Credential, UnifiedJob, Instance
from awx.main.tests.factories import (
create_organization,
create_job_template,
create_instance,
create_instance_group,
create_notification_template,
create_survey_spec,
create_workflow_job_template,
)
from django.core.cache import cache
from django.conf import settings
def pytest_addoption(parser):
parser.addoption("--genschema", action="store_true", default=False, help="execute schema validator")
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
import sys
del sys._called_from_test
@pytest.fixture
def mock_access():
@contextmanager
def access_given_class(TowerClass):
try:
mock_instance = mock.MagicMock(__name__='foobar')
MockAccess = mock.MagicMock(return_value=mock_instance)
the_patch = mock.patch.dict('awx.main.access.access_registry', {TowerClass: MockAccess}, clear=False)
the_patch.__enter__()
yield mock_instance
finally:
the_patch.__exit__()
return access_given_class
@pytest.fixture
def job_template_factory():
return create_job_template
@pytest.fixture
def organization_factory():
return create_organization
@pytest.fixture
def notification_template_factory():
return create_notification_template
@pytest.fixture
def survey_spec_factory():
return create_survey_spec
@pytest.fixture
def instance_factory():
return create_instance
@pytest.fixture
def instance_group_factory():
return create_instance_group
@pytest.fixture
def controlplane_instance_group(instance_factory, instance_group_factory):
"""There always has to be a controlplane instancegroup and at least one instance in it"""
return create_instance_group(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, create_instance('hybrid-1', node_type='hybrid', capacity=500))
@pytest.fixture
def default_instance_group(instance_factory, instance_group_factory):
return create_instance_group("default", instances=[create_instance("hostA", node_type='execution')])
@pytest.fixture
def control_instance():
'''Control instance in the controlplane automatic IG'''
inst = create_instance('control-1', node_type='control', capacity=500)
return inst
@pytest.fixture
def control_instance_low_capacity():
'''Control instance in the controlplane automatic IG that has low capacity'''
inst = create_instance('control-1', node_type='control', capacity=5)
return inst
@pytest.fixture
def execution_instance():
'''Execution node in the automatic default IG'''
ig = create_instance_group('default')
inst = create_instance('receptor-1', node_type='execution', capacity=500)
ig.instances.add(inst)
return inst
@pytest.fixture
def hybrid_instance():
'''Hybrid node in the default controlplane IG'''
inst = create_instance('hybrid-1', node_type='hybrid', capacity=500)
return inst
@pytest.fixture
def job_template_with_survey_passwords_factory(job_template_factory):
def rf(persisted):
"Returns job with linked JT survey with password survey questions"
objects = job_template_factory(
'jt',
organization='org1',
survey=[
{'variable': 'submitter_email', 'type': 'text', 'default': 'foobar@redhat.com'},
{'variable': 'secret_key', 'default': '6kQngg3h8lgiSTvIEb21', 'type': 'password'},
{'variable': 'SSN', 'type': 'password'},
],
persisted=persisted,
)
return objects.job_template
return rf
@pytest.fixture
def job_with_secret_key_unit(job_with_secret_key_factory):
return job_with_secret_key_factory(persisted=False)
@pytest.fixture
def workflow_job_template_factory():
return create_workflow_job_template
@pytest.fixture
def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory):
return job_template_with_survey_passwords_factory(persisted=False)
@pytest.fixture
def mock_cache():
class MockCache(object):
cache = {}
def get(self, key, default=None):
return self.cache.get(key, default)
def set(self, key, value, timeout=60):
self.cache[key] = value
def delete(self, key):
del self.cache[key]
return MockCache()
def pytest_runtest_teardown(item, nextitem):
# clear Django cache at the end of every test ran
# NOTE: this should not be memcache (as it is deprecated), nor should it be redis.
# This is a local test cache, so we want every test to start with an empty cache
cache.clear()
@pytest.fixture(scope='session', autouse=True)
def mock_external_credential_input_sources():
# Credential objects query their related input sources on initialization.
# We mock that behavior out of credentials by default unless we need to
# test it explicitly.
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_has_unpartitioned_events():
# has_unpartitioned_events determines if there are any events still
# left in the old, unpartitioned job events table. In order to work,
# this method looks up when the partition migration occurred. When
# Django's unit tests run, however, there will be no record of the migration.
# We mock this out to circumvent the migration query.
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_get_event_queryset_no_job_created():
"""
SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the
job_created field. That field does not actually exist in a non-partition scenario.
"""
def event_qs(self):
kwargs = {self.event_parent_key: self.id}
return self.event_class.objects.filter(**kwargs)
with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:
yield _fixture
@pytest.fixture
def mock_me():
me_mock = mock.MagicMock(return_value=Instance(id=1, hostname=settings.CLUSTER_HOST_ID, uuid='00000000-0000-0000-0000-000000000000'))
with mock.patch.object(Instance.objects, 'me', me_mock):
yield
|
e2e7a50045556e59f438ab2eb9760f6ab3e26acf
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/fuchsia-gn-sdk/src/prepare_package_inputs.py
|
a1a25252ed4a5f164688d478bc1d9f5c38641867
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 12,497
|
py
|
prepare_package_inputs.py
|
#!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Derivative work of https://chromium.googlesource.com/chromium/src/+/HEAD/build/config/fuchsia/prepare_package_inputs.py
#
"""Creates a archive manifest used for Fuchsia package generation."""
import argparse
from fnmatch import fnmatch
import json
import os
import shutil
import subprocess
import sys
def make_package_path(file_path, roots):
"""Computes a path for |file_path| relative to one of the |roots|.
Args:
file_path: The file path to relativize.
roots: A list of directory paths which may serve as a relative root for
|file_path|.
For example:
* make_package_path('/foo/bar.txt', ['/foo/']) 'bar.txt'
* make_package_path('/foo/dir/bar.txt', ['/foo/']) 'dir/bar.txt'
* make_package_path('/foo/out/Debug/bar.exe', ['/foo/', '/foo/out/Debug/']) 'bar.exe'
"""
# Prevents greedily matching against a shallow path when a deeper, better
# matching path exists.
roots.sort(key=len, reverse=True)
for next_root in roots:
if not next_root.endswith(os.sep):
next_root += os.sep
if file_path.startswith(next_root):
relative_path = file_path[len(next_root):]
return relative_path
return file_path
def _get_stripped_path(bin_path):
"""Finds the stripped version of |bin_path| in the build output directory.
returns |bin_path| if no stripped path is found.
"""
stripped_path = bin_path.replace('lib.unstripped/',
'lib/').replace('exe.unstripped/', '')
if os.path.exists(stripped_path):
return stripped_path
else:
return bin_path
def _is_binary(path):
"""Checks if the file at |path| is an ELF executable.
This is done by inspecting its FourCC header.
"""
with open(path, 'rb') as f:
file_tag = f.read(4)
return file_tag == b'\x7fELF'
def _write_build_ids_txt(readelf_exec, binary_paths, ids_txt_path):
"""Writes an index text file mapping build IDs to unstripped binaries."""
READELF_FILE_PREFIX = 'File: '
READELF_BUILD_ID_PREFIX = 'Build ID: '
# List of binaries whose build IDs are awaiting processing by readelf.
# Entries are removed as readelf's output is parsed.
unprocessed_binary_paths = set(binary_paths)
build_ids_map = {}
# Sanity check that unstripped binaries do not also have their stripped
# counterpart listed.
for binary_path in binary_paths:
stripped_binary_path = _get_stripped_path(binary_path)
if stripped_binary_path != binary_path:
unprocessed_binary_paths.discard(stripped_binary_path)
with open(ids_txt_path, 'w') as ids_file:
# Create a set to dedupe stripped binary paths in case both the stripped and
# unstripped versions of a binary are specified.
readelf_stdout = subprocess.check_output(
[readelf_exec, '-n'] +
sorted(unprocessed_binary_paths)).decode('utf8')
if len(binary_paths) == 1:
# Readelf won't report a binary's path if only one was provided to the
# tool.
binary_path = binary_paths[0]
else:
binary_path = None
for line in readelf_stdout.split('\n'):
line = line.strip()
if line.startswith(READELF_FILE_PREFIX):
binary_path = line[len(READELF_FILE_PREFIX):]
assert binary_path in unprocessed_binary_paths
elif line.startswith(READELF_BUILD_ID_PREFIX):
# Paths to the unstripped executables listed in "ids.txt" are specified
# as relative paths to that file.
unstripped_rel_path = os.path.relpath(
os.path.abspath(binary_path),
os.path.dirname(os.path.abspath(ids_txt_path)))
build_id = line[len(READELF_BUILD_ID_PREFIX):]
build_ids_map[build_id] = unstripped_rel_path
unprocessed_binary_paths.remove(binary_path)
for id_and_path in sorted(build_ids_map.items()):
ids_file.write(id_and_path[0] + ' ' + id_and_path[1] + '\n')
# Did readelf forget anything? Make sure that all binaries are accounted for.
assert not unprocessed_binary_paths
def _parse_component(component_info_file):
component_info = json.load(open(component_info_file, 'r'))
return component_info
def _get_component_manifests(component_info):
return [c for c in component_info if c.get('type') == 'manifest']
def _get_resource_items(component_info):
return [c for c in component_info if c.get('type') == 'resource']
def _get_expanded_files(runtime_deps_file):
""" Process the runtime deps file for file paths, recursively walking
directories as needed.
Returns a set of expanded files referenced by the runtime deps file.
"""
# runtime_deps may contain duplicate paths, so use a set for
# de-duplication.
expanded_files = set()
for next_path in open(runtime_deps_file, 'r'):
next_path = next_path.strip()
if os.path.isdir(next_path):
for root, _, files in os.walk(next_path):
for current_file in files:
expanded_files.add(
os.path.normpath(os.path.join(root, current_file)))
else:
expanded_files.add(os.path.normpath(next_path))
return expanded_files
def _write_gn_deps_file(
depfile_path, package_manifest, component_manifests, out_dir,
expanded_files):
with open(depfile_path, 'w') as depfile:
deps_list = [os.path.relpath(f, out_dir) for f in expanded_files]
deps_list.extend(component_manifests)
# The deps file is space-delimited, so filenames containing spaces
# must have them escaped.
deps_list = [f.replace(' ', '\\ ') for f in deps_list]
deps_string = ' '.join(sorted(deps_list))
depfile.write('%s: %s' % (package_manifest, deps_string))
def _write_meta_package_manifest(
manifest_entries, manifest_path, app_name, out_dir):
# Write meta/package manifest file and add to archive manifest.
meta_package = os.path.join(os.path.dirname(manifest_path), 'package')
with open(meta_package, 'w') as package_json:
json_payload = {'version': '0', 'name': app_name}
json.dump(json_payload, package_json)
package_json_filepath = os.path.relpath(package_json.name, out_dir)
manifest_entries['meta/package'] = package_json_filepath
def _write_component_manifest(
manifest_entries, component_info, manifest_path, out_dir):
"""Copy component manifest files and add to archive manifest.
Raises an exception if a component uses a unknown manifest version.
"""
for component_manifest in _get_component_manifests(component_info):
manifest_source = component_manifest.get('source')
manifest_basename = os.path.basename(manifest_source)
if 'output_name' in component_manifest:
_, extension = os.path.splitext(manifest_basename)
manifest_basename = component_manifest.get('output_name') + \
extension
manifest_dest_file_path = os.path.join(
os.path.dirname(manifest_path), manifest_basename)
shutil.copy(manifest_source, manifest_dest_file_path)
manifest_entry_key = os.path.join('meta', manifest_basename)
manifest_entries[manifest_entry_key] = os.path.relpath(
manifest_dest_file_path, out_dir)
return manifest_dest_file_path
def _is_excluded(in_package_path, excluded_paths):
"""Returns true if |in_package_path| is filtered out by any entries of
|excluded_paths|."""
return any([fnmatch(in_package_path, excl) for excl in excluded_paths])
def _write_package_manifest(
manifest_entries, expanded_files, excluded_paths, out_dir, root_dir, component_info):
"""Writes the package manifest for a Fuchsia package
Returns a list of binaries in the package.
Raises an exception if excluded files are not found."""
gen_dir = os.path.normpath(os.path.join(out_dir, 'gen'))
roots = [gen_dir, root_dir, out_dir]
# Filter out component manifests. These are written out elsewhere.
excluded_paths.extend([
make_package_path(
os.path.relpath(cf.get('source'), out_dir), roots)
for cf in _get_component_manifests(component_info)
if os.path.relpath(cf.get('source'), out_dir) in expanded_files
])
# Write out resource files with specific package paths, and exclude them from
# the list of expanded files so they are not listed twice in the manifest.
for resource in _get_resource_items(component_info):
relative_src_file = os.path.relpath(resource.get('source'), out_dir)
resource_path = make_package_path(relative_src_file, roots)
manifest_entries[resource.get('dest')] = relative_src_file
excluded_paths.append(resource_path)
for current_file in expanded_files:
current_file = _get_stripped_path(current_file)
# make_package_path() may relativize to either the source root or
# output directory.
in_package_path = make_package_path(current_file, roots)
# Include the file if it isn't filtered out.
if not _is_excluded(in_package_path, excluded_paths):
manifest_entries[in_package_path] = current_file
def _build_manifest(args):
# Use a sorted list to make sure the manifest order is deterministic.
expanded_files = sorted(_get_expanded_files(args.runtime_deps_file))
component_info = _parse_component(args.json_file)
component_manifests = []
# Collect the manifest entries in a map since duplication happens
# because of runtime libraries.
manifest_entries = {}
_write_meta_package_manifest(
manifest_entries, args.manifest_path, args.app_name, args.out_dir)
for component_item in component_info:
_write_package_manifest(
manifest_entries, expanded_files, args.exclude_path, args.out_dir,
args.root_dir, component_item)
component_manifests.append(
_write_component_manifest(
manifest_entries, component_item, args.manifest_path,
args.out_dir))
with open(args.manifest_path, 'w') as manifest:
for key in sorted(manifest_entries.keys()):
manifest.write('%s=%s\n' % (key, manifest_entries[key]))
binaries = [f for f in expanded_files if _is_binary(f)]
_write_build_ids_txt(
args.readelf_exec, sorted(binaries), args.build_ids_file)
gen_dir = os.path.normpath(os.path.join(args.out_dir, 'gen'))
roots = [gen_dir, args.root_dir, args.out_dir]
# Don't include excluded files in the GN depfile.
expanded_files = [f for f in expanded_files if
not _is_excluded(make_package_path(f, roots), args.exclude_path)]
_write_gn_deps_file(
args.depfile_path, args.manifest_path, component_manifests,
args.out_dir, expanded_files)
return 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--root-dir', required=True, help='Build root directory')
parser.add_argument(
'--out-dir', required=True, help='Build output directory')
parser.add_argument('--app-name', required=True, help='Package name')
parser.add_argument(
'--runtime-deps-file',
required=True,
help='File with the list of runtime dependencies.')
parser.add_argument(
'--depfile-path', required=True, help='Path to write GN deps file.')
parser.add_argument(
'--exclude-path',
action='append',
default=[],
help='List of filter expressions for excluding files or directories.')
parser.add_argument(
'--manifest-path', required=True, help='Manifest output path.')
parser.add_argument(
'--build-ids-file', required=True, help='Debug symbol index path.')
parser.add_argument('--json-file', required=True)
parser.add_argument(
'--readelf-exec', default='readelf', help='readelf executable to use.')
args = parser.parse_args()
return _build_manifest(args)
if __name__ == '__main__':
sys.exit(main())
|
fed0ffe9ab1a1f40f3fb785db6a6ece9ed25aa42
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/docs/sphinx/rest_substitutions/snippets/python/converted/wx.Font.__init__.2.py
|
68d30a6818c7f33320bb5c3db74cafcc3683a991
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 147
|
py
|
wx.Font.__init__.2.py
|
# Create a font using the old-style constructor
font = wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, True)
|
0ae5dcb54d3c96d94b5cf015a5889d1522735443
|
f7a656e5676e90e6b70674be59907360a5ceaaa2
|
/docker-images/app/main.py
|
0abccfdd0f67e17f18cb45c3347917e579e47d9a
|
[
"MIT"
] |
permissive
|
tiangolo/uvicorn-gunicorn-docker
|
c54c342e1df477f3196729b46ad5506609aa4797
|
49d3538121828a20f22b609c8278246e94a71d82
|
refs/heads/master
| 2023-09-03T17:24:42.410151
| 2023-07-09T13:12:47
| 2023-07-09T13:12:47
| 164,100,644
| 564
| 291
|
MIT
| 2023-09-05T06:51:24
| 2019-01-04T11:44:13
|
Python
|
UTF-8
|
Python
| false
| false
| 640
|
py
|
main.py
|
import sys
class App:
def __init__(self, scope):
assert scope["type"] == "http"
self.scope = scope
async def __call__(self, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
version = f"{sys.version_info.major}.{sys.version_info.minor}"
message = f"Hello world! From Uvicorn with Gunicorn. Using Python {version}".encode(
"utf-8"
)
await send({"type": "http.response.body", "body": message})
app = App
|
ef0a7c5cf367e725b59795e1b6c0fa1478a83600
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-jira/unit_tests/test_source.py
|
0abeb53115bbfa82d781be40e14b802df33a9ac1
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
test_source.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from unittest.mock import MagicMock
import responses
from source_jira.source import SourceJira
@responses.activate
def test_streams(config):
source = SourceJira()
streams = source.streams(config)
expected_streams_number = 51
assert len(streams) == expected_streams_number
@responses.activate
def test_check_connection(config, projects_response, labels_response):
responses.add(
responses.GET,
f"https://{config['domain']}/rest/api/3/project/search?maxResults=50&expand=description",
json=projects_response,
)
responses.add(
responses.GET,
f"https://{config['domain']}/rest/api/3/label?maxResults=50",
json=labels_response,
)
source = SourceJira()
logger_mock = MagicMock()
assert source.check_connection(logger=logger_mock, config=config) == (True, None)
def test_get_authenticator(config):
source = SourceJira()
authenticator = source.get_authenticator(config=config)
assert authenticator.get_auth_header() == {'Authorization': 'Basic ZW1haWxAZW1haWwuY29tOnRva2Vu'}
|
ff81c2fc86d433c70375980a41274ed0cd717c7d
|
05e634a232574f676434dfa8e4183f3d0a1a4bc9
|
/paddlecv/ppcv/ops/models/ocr/__init__.py
|
acb84341c2f526d0dec86229261ca8b97dc030ca
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/models
|
67ac00d93c5255ac64a9d80ae5be2e8927e47cee
|
8042c21b690ffc0162095e749a41b94dd38732da
|
refs/heads/release/2.4
| 2023-09-04T15:23:59.543625
| 2023-07-20T11:54:16
| 2023-07-20T11:54:16
| 88,868,842
| 7,633
| 3,597
|
Apache-2.0
| 2023-09-05T23:23:54
| 2017-04-20T13:30:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import ocr_db_detection
from . import ocr_crnn_recognition
from . import ocr_table_recognition
from . import ocr_kie
from .ocr_db_detection import *
from .ocr_crnn_recognition import *
from .ocr_table_recognition import *
from .ocr_kie import *
__all__ = ocr_db_detection.__all__
__all__ += ocr_crnn_recognition.__all__
__all__ += ocr_table_recognition.__all__
__all__ += ocr_kie.__all__
|
976bad63fe79aa3184435b7999e5382e94b02648
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/delegated_admin_relationship_operation.py
|
681ef839589a7410c71a753c293ace9210deae6c
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,868
|
py
|
delegated_admin_relationship_operation.py
|
from __future__ import annotations
import datetime
from dataclasses import dataclass, field
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .delegated_admin_relationship_operation_type import DelegatedAdminRelationshipOperationType
from .entity import Entity
from .long_running_operation_status import LongRunningOperationStatus
from .entity import Entity
@dataclass
class DelegatedAdminRelationshipOperation(Entity):
# The time in ISO 8601 format and in UTC time when the long-running operation was created. Read-only.
created_date_time: Optional[datetime.datetime] = None
# The data (payload) for the operation. Read-only.
data: Optional[str] = None
# The time in ISO 8601 format and in UTC time when the long-running operation was last modified. Read-only.
last_modified_date_time: Optional[datetime.datetime] = None
# The OdataType property
odata_type: Optional[str] = None
# The operationType property
operation_type: Optional[DelegatedAdminRelationshipOperationType] = None
# The status property
status: Optional[LongRunningOperationStatus] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DelegatedAdminRelationshipOperation:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: DelegatedAdminRelationshipOperation
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return DelegatedAdminRelationshipOperation()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .delegated_admin_relationship_operation_type import DelegatedAdminRelationshipOperationType
from .entity import Entity
from .long_running_operation_status import LongRunningOperationStatus
from .delegated_admin_relationship_operation_type import DelegatedAdminRelationshipOperationType
from .entity import Entity
from .long_running_operation_status import LongRunningOperationStatus
fields: Dict[str, Callable[[Any], None]] = {
"createdDateTime": lambda n : setattr(self, 'created_date_time', n.get_datetime_value()),
"data": lambda n : setattr(self, 'data', n.get_str_value()),
"lastModifiedDateTime": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),
"operationType": lambda n : setattr(self, 'operation_type', n.get_enum_value(DelegatedAdminRelationshipOperationType)),
"status": lambda n : setattr(self, 'status', n.get_enum_value(LongRunningOperationStatus)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
super().serialize(writer)
writer.write_datetime_value("createdDateTime", self.created_date_time)
writer.write_str_value("data", self.data)
writer.write_datetime_value("lastModifiedDateTime", self.last_modified_date_time)
writer.write_enum_value("operationType", self.operation_type)
writer.write_enum_value("status", self.status)
|
5b88c445feee4f592768769ff319ea991d24e9b9
|
d4558f1008f672686f2cb238ab5d86a2c2080b40
|
/OpenCV models/ImageHistogram.py
|
efd75a5801a91f2097ed6aa16258c342326679f6
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
amarlearning/Finger-Detection-and-Tracking
|
9e5f323f8cb49ad403877d4e7f6ab3cf640a531c
|
0bf6aef056a1850fe101317a72a57d2b4abb8bc9
|
refs/heads/master
| 2023-05-30T11:19:45.405599
| 2023-05-12T12:01:52
| 2023-05-12T12:01:52
| 135,921,262
| 341
| 163
|
MIT
| 2023-05-12T11:57:23
| 2018-06-03T16:29:59
|
Python
|
UTF-8
|
Python
| false
| false
| 998
|
py
|
ImageHistogram.py
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
def main():
image = cv2.imread("../data/4.1.03.tiff", 1)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
red_hist = cv2.calcHist([image_rgb], [0], None, [256], [0, 255])
green_hist = cv2.calcHist([image_rgb], [1], None, [256], [0, 255])
blue_hist = cv2.calcHist([image_rgb], [2], None, [256], [0, 255])
# Histogram using Matplotlib
plt.subplot(3, 1, 1)
plt.hist(image.ravel(), 256, [0, 255])
plt.xlim([0, 255])
plt.title("Image Histogram using Matplotlib")
# Histogram using Numpy
plt.subplot(3, 1, 2)
histogram, _ = np.histogram(image.ravel(), 256, [0, 255])
plt.plot(histogram, color='r')
plt.xlim([0, 255])
plt.title("Image Histogram using Numpy")
# Histogram using Numpy
plt.subplot(3, 1, 3)
plt.plot(red_hist, color='r')
plt.xlim([0, 255])
plt.title("Image Histogram using OpenCV")
plt.show()
if __name__ == '__main__':
main()
|
fea036349acdca61b65891ddcd8c462b2a0f738c
|
b305d7e8d309d963750ecfca62a4ea4b16d01b1f
|
/examples/pytest/test_commands.py
|
339829ee2202ed431f2e73c93b8d38e5e1a63a92
|
[
"MIT"
] |
permissive
|
JosXa/tgintegration
|
67440042039e67e70402cce01c018d354e841104
|
39dfc82eb5c80bb6845c12edf0c112e2fa7de26f
|
refs/heads/master
| 2023-07-21T17:08:09.425620
| 2022-12-01T18:38:50
| 2022-12-01T18:38:50
| 131,617,892
| 132
| 20
|
MIT
| 2023-07-20T11:37:36
| 2018-04-30T16:11:49
|
Python
|
UTF-8
|
Python
| false
| false
| 504
|
py
|
test_commands.py
|
import pytest
from tgintegration import BotController
from tgintegration import Response
@pytest.mark.asyncio
async def test_commands(controller: BotController):
# The BotController automatically loads the available commands and we test them all here
for c in controller.command_list:
async with controller.collect() as res: # type: Response
await controller.send_command(c.command)
assert not res.is_empty, "Bot did not respond to command /{}.".format(c.command)
|
2c426f555da3dae5915ad72fcba8a48c1789e0e2
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/haystack/pipelines/ray.py
|
891e276094703c6c1edef2a1e69b226be5612278
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 23,577
|
py
|
ray.py
|
from __future__ import annotations
import inspect
import logging
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
from pathlib import Path
import networkx as nx
try:
from ray import serve
import ray
except:
ray = None # type: ignore
serve = None # type: ignore
from haystack.errors import PipelineError
from haystack.pipelines.config import (
get_component_definitions,
get_pipeline_definition,
read_pipeline_config_from_yaml,
validate_config,
)
from haystack.nodes.base import BaseComponent, RootNode
from haystack.pipelines.base import Pipeline
from haystack.schema import Document, MultiLabel
from haystack.telemetry import send_pipeline_event
logger = logging.getLogger(__name__)
class RayPipeline(Pipeline):
"""
[Ray](https://ray.io) is a framework for distributed computing.
With Ray, you can distribute a Pipeline's components across a cluster of machines. The individual components of a
Pipeline can be independently scaled. For instance, an extractive QA Pipeline deployment can have three replicas
of the Reader and a single replica for the Retriever. This way, you can use your resources more efficiently by horizontally scaling Components.
To set the number of replicas, add `num_replicas` in the YAML configuration for the node in a pipeline:
```yaml
components:
...
pipelines:
- name: ray_query_pipeline
type: RayPipeline
nodes:
- name: Retriever
inputs: [ Query ]
serve_deployment_kwargs:
num_replicas: 2 # number of replicas to create on the Ray cluster
```
A Ray Pipeline can only be created with a YAML Pipeline configuration.
```python
from haystack.pipeline import RayPipeline
pipeline = RayPipeline.load_from_yaml(path="my_pipelines.yaml", pipeline_name="my_query_pipeline")
pipeline.run(query="What is the capital of Germany?")
```
By default, RayPipelines create an instance of RayServe locally. To connect to an existing Ray instance,
set the `address` parameter when creating the RayPipeline instance.
YAML definitions of Ray pipelines are validated at load. For more information, see [YAML File Definitions](https://haystack-website-git-fork-fstau-dev-287-search-deepset-overnice.vercel.app/components/pipelines#yaml-file-definitions).
"""
def __init__(
self,
address: Optional[str] = None,
ray_args: Optional[Dict[str, Any]] = None,
serve_args: Optional[Dict[str, Any]] = None,
):
"""
:param address: The IP address for the Ray cluster. If set to `None`, a local Ray instance is started.
:param ray_args: Optional parameters for initializing Ray.
:param serve_args: Optional parameters for initializing Ray Serve.
"""
ray_args = ray_args or {}
if not ray.is_initialized():
ray.init(address=address, **ray_args)
else:
logger.warning("Ray was already initialized, so reusing that for this RayPipeline.")
self._serve_controller_client = serve.start(**serve_args)
super().__init__()
@classmethod
def load_from_config(
cls,
pipeline_config: Dict,
pipeline_name: Optional[str] = None,
overwrite_with_env_variables: bool = True,
strict_version_check: bool = False,
address: Optional[str] = None,
ray_args: Optional[Dict[str, Any]] = None,
serve_args: Optional[Dict[str, Any]] = None,
):
validate_config(pipeline_config, strict_version_check=strict_version_check, extras="ray")
pipeline_definition = get_pipeline_definition(pipeline_config=pipeline_config, pipeline_name=pipeline_name)
component_definitions = get_component_definitions(
pipeline_config=pipeline_config, overwrite_with_env_variables=overwrite_with_env_variables
)
pipeline = cls(address=address, ray_args=ray_args or {}, serve_args=serve_args or {})
for node_config in pipeline_definition["nodes"]:
if pipeline.root_node is None:
root_node = node_config["inputs"][0]
if root_node in ["Query", "File"]:
handle = cls._create_ray_deployment(component_name=root_node, pipeline_config=pipeline_config)
pipeline._add_ray_deployment_in_graph(handle=handle, name=root_node, outgoing_edges=1, inputs=[])
else:
raise KeyError(f"Root node '{root_node}' is invalid. Available options are 'Query' and 'File'.")
name = node_config["name"]
component_type = component_definitions[name]["type"]
component_class = BaseComponent.get_subclass(component_type)
serve_deployment_kwargs = next(node for node in pipeline_definition["nodes"] if node["name"] == name).get(
"serve_deployment_kwargs", {}
)
handle = cls._create_ray_deployment(
component_name=name, pipeline_config=pipeline_config, serve_deployment_kwargs=serve_deployment_kwargs
)
pipeline._add_ray_deployment_in_graph(
handle=handle,
name=name,
outgoing_edges=component_class.outgoing_edges,
inputs=node_config.get("inputs", []),
)
pipeline.update_config_hash()
return pipeline
@classmethod
def load_from_yaml( # type: ignore
cls,
path: Path,
pipeline_name: Optional[str] = None,
overwrite_with_env_variables: bool = True,
address: Optional[str] = None,
strict_version_check: bool = False,
ray_args: Optional[Dict[str, Any]] = None,
serve_args: Optional[Dict[str, Any]] = None,
):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
version: '1.0.0'
components: # define all the building-blocks for Pipeline
- name: MyReader # custom-name for the component; helpful for visualization & debugging
type: FARMReader # Haystack Class name for the component
params:
no_ans_boost: -10
model_name_or_path: deepset/roberta-base-squad2
- name: MyRetriever
type: BM25Retriever
params:
document_store: MyDocumentStore # params can reference other components defined in the YAML
custom_query: null
- name: MyDocumentStore
type: ElasticsearchDocumentStore
params:
index: haystack_test
pipelines: # multiple Pipelines can be defined using the components from above
- name: my_query_pipeline # a simple extractive-qa Pipeline
type: RayPipeline
nodes:
- name: MyRetriever
inputs: [Query]
serve_deployment_kwargs:
num_replicas: 2 # number of replicas to create on the Ray cluster
- name: MyReader
inputs: [MyRetriever]
```
Note that, in case of a mismatch in version between Haystack and the YAML, a warning will be printed.
If the pipeline loads correctly regardless, save again the pipeline using `RayPipeline.save_to_yaml()` to remove the warning.
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
:param address: The IP address for the Ray cluster. If set to None, a local Ray instance is started.
:param serve_args: Optional parameters for initializing Ray Serve.
"""
pipeline_config = read_pipeline_config_from_yaml(path)
return cls.load_from_config(
pipeline_config=pipeline_config,
pipeline_name=pipeline_name,
overwrite_with_env_variables=overwrite_with_env_variables,
strict_version_check=strict_version_check,
address=address,
ray_args=ray_args,
serve_args=serve_args,
)
@classmethod
def _create_ray_deployment(
cls, component_name: str, pipeline_config: dict, serve_deployment_kwargs: Optional[Dict[str, Any]] = None
):
"""
Create a Ray Deployment for the Component.
:param component_name: Class name of the Haystack Component.
:param pipeline_config: The Pipeline config YAML parsed as a dict.
:param serve_deployment_kwargs: An optional dictionary of arguments to be supplied to the
`ray.serve.deployment()` method, like `num_replicas`, `ray_actor_options`,
`max_concurrent_queries`, etc. See potential values in the
Ray Serve API docs (https://docs.ray.io/en/latest/serve/package-ref.html)
under the `ray.serve.deployment()` method
"""
if serve_deployment_kwargs is None:
serve_deployment_kwargs = {}
RayDeployment = serve.deployment(
_RayDeploymentWrapper, name=component_name, **serve_deployment_kwargs # type: ignore
)
RayDeployment.deploy(pipeline_config, component_name)
handle = RayDeployment.get_handle()
return handle
def add_node(self, component, name: str, inputs: List[str]):
raise NotImplementedError(
"The current implementation of RayPipeline only supports loading Pipelines from a YAML file."
)
def _add_ray_deployment_in_graph(self, handle, name: str, outgoing_edges: int, inputs: List[str]):
"""
Add the Ray deployment handle in the Pipeline Graph.
:param handle: Ray deployment `handle` to add in the Pipeline Graph. The handle allow calling a Ray deployment
from Python: https://docs.ray.io/en/main/serve/package-ref.html#servehandle-api.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'FilterRetriever' node would always output a single
edge with a list of documents. It can be represented as ["FilterRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
self.graph.add_node(name, component=handle, inputs=inputs, outgoing_edges=outgoing_edges)
if len(self.graph.nodes) == 2: # first node added; connect with Root
self.graph.add_edge(self.root_node, name, label="output_1")
return
for i in inputs:
if "." in i:
[input_node_name, input_edge_name] = i.split(".")
assert "output_" in input_edge_name, f"'{input_edge_name}' is not a valid edge name."
outgoing_edges_input_node = self.graph.nodes[input_node_name]["outgoing_edges"]
assert int(input_edge_name.split("_")[1]) <= outgoing_edges_input_node, (
f"Cannot connect '{input_edge_name}' from '{input_node_name}' as it only has "
f"{outgoing_edges_input_node} outgoing edge(s)."
)
else:
outgoing_edges_input_node = self.graph.nodes[i]["outgoing_edges"]
assert outgoing_edges_input_node == 1, (
f"Adding an edge from {i} to {name} is ambiguous as {i} has {outgoing_edges_input_node} edges. "
f"Please specify the output explicitly."
)
input_node_name = i
input_edge_name = "output_1"
self.graph.add_edge(input_node_name, name, label=input_edge_name)
def _run_node(self, node_id: str, node_input: Dict[str, Any]) -> Tuple[Dict, str]:
return ray.get(self.graph.nodes[node_id]["component"].remote(**node_input))
async def _run_node_async(self, node_id: str, node_input: Dict[str, Any]) -> Tuple[Dict, str]:
# Async calling of Ray Deployments instead of using `ray.get()` as it is done
# in the sync version, in `_run_node()` above.
# See https://docs.ray.io/en/latest/ray-core/actors/async_api.html#objectrefs-as-asyncio-futures
return await self.graph.nodes[node_id]["component"].remote(**node_input)
def _get_run_node_signature(self, node_id: str):
return inspect.signature(self.graph.nodes[node_id]["component"].remote).parameters.keys()
# async version of the `Pipeline.run()` method
async def run_async( # type: ignore
self,
query: Optional[str] = None,
file_paths: Optional[List[str]] = None,
labels: Optional[MultiLabel] = None,
documents: Optional[List[Document]] = None,
meta: Optional[Union[dict, List[dict]]] = None,
params: Optional[dict] = None,
debug: Optional[bool] = None,
):
"""
Runs the Pipeline, one node at a time.
:param query: The search query (for query pipelines only).
:param file_paths: The files to index (for indexing pipelines only).
:param labels: Ground-truth labels that you can use to perform an isolated evaluation of pipelines. These labels are input to nodes in the pipeline.
:param documents: A list of Document objects to be processed by the Pipeline Nodes.
:param meta: Files' metadata. Used in indexing pipelines in combination with `file_paths`.
:param params: A dictionary of parameters that you want to pass to the nodes.
To pass a parameter to all Nodes, use: `{"top_k": 10}`.
To pass a parameter to targeted Nodes, run:
`{"Retriever": {"top_k": 10}, "Reader": {"top_k": 3, "debug": True}}`
:param debug: Specifies whether the Pipeline should instruct Nodes to collect debug information
about their execution. By default, this information includes the input parameters
the Nodes received and the output they generated. You can then find all debug information in the dictionary returned by this method under the key `_debug`.
"""
send_pipeline_event(
pipeline=self,
query=query,
file_paths=file_paths,
labels=labels,
documents=documents,
meta=meta,
params=params,
debug=debug,
)
# validate the node names
self._validate_node_names_in_params(params=params)
root_node = self.root_node
if not root_node:
raise PipelineError("Cannot run a pipeline with no nodes.")
node_output = None
queue: Dict[str, Any] = {
root_node: {"root_node": root_node, "params": params}
} # ordered dict with "node_id" -> "input" mapping that acts as a FIFO queue
if query is not None:
queue[root_node]["query"] = query
if file_paths:
queue[root_node]["file_paths"] = file_paths
if labels:
queue[root_node]["labels"] = labels
if documents:
queue[root_node]["documents"] = documents
if meta:
queue[root_node]["meta"] = meta
i = 0 # the first item is popped off the queue unless it is a "join" node with unprocessed predecessors
while queue:
node_id = list(queue.keys())[i]
node_input = queue[node_id]
node_input["node_id"] = node_id
# Apply debug attributes to the node input params
# NOTE: global debug attributes will override the value specified
# in each node's params dictionary.
if debug is None and node_input:
if node_input.get("params", {}):
debug = params.get("debug", None) # type: ignore
if debug is not None:
if not node_input.get("params", None):
node_input["params"] = {}
if node_id not in node_input["params"].keys():
node_input["params"][node_id] = {}
node_input["params"][node_id]["debug"] = debug
predecessors = set(nx.ancestors(self.graph, node_id))
if predecessors.isdisjoint(set(queue.keys())): # only execute if predecessor nodes are executed
try:
logger.debug("Running node '%s` with input: %s", node_id, node_input)
start = time()
node_output, stream_id = await self._run_node_async(node_id, node_input)
if "_debug" in node_output and node_id in node_output["_debug"]:
node_output["_debug"][node_id]["exec_time_ms"] = round((time() - start) * 1000, 2)
except Exception as e:
# The input might be a really large object with thousands of embeddings.
# If you really want to see it, raise the log level.
logger.debug("Exception while running node '%s' with input %s", node_id, node_input)
raise Exception(
f"Exception while running node '{node_id}': {e}\nEnable debug logging to see the data that was passed when the pipeline failed."
) from e
queue.pop(node_id)
#
if stream_id == "split":
for stream_id in [key for key in node_output.keys() if key.startswith("output_")]:
current_node_output = {k: v for k, v in node_output.items() if not k.startswith("output_")}
current_docs = node_output.pop(stream_id)
current_node_output["documents"] = current_docs
next_nodes = self.get_next_nodes(node_id, stream_id)
for n in next_nodes:
queue[n] = current_node_output
else:
next_nodes = self.get_next_nodes(node_id, stream_id)
for n in next_nodes: # add successor nodes with corresponding inputs to the queue
if queue.get(n): # concatenate inputs if it's a join node
existing_input = queue[n]
if "inputs" not in existing_input.keys():
updated_input: dict = {"inputs": [existing_input, node_output], "params": params}
if "_debug" in existing_input.keys() or "_debug" in node_output.keys():
updated_input["_debug"] = {
**existing_input.get("_debug", {}),
**node_output.get("_debug", {}),
}
if query:
updated_input["query"] = query
if file_paths:
updated_input["file_paths"] = file_paths
if labels:
updated_input["labels"] = labels
if documents:
updated_input["documents"] = documents
if meta:
updated_input["meta"] = meta
else:
existing_input["inputs"].append(node_output)
updated_input = existing_input
queue[n] = updated_input
else:
queue[n] = node_output
i = 0
else:
i += 1 # attempt executing next node in the queue as current `node_id` has unprocessed predecessors
# Disabled due to issue https://github.com/deepset-ai/haystack/issues/3970
# self.send_pipeline_event_if_needed(is_indexing=file_paths is not None)
return node_output
def send_pipeline_event(self, is_indexing: bool = False):
"""To avoid the RayPipeline serialization bug described at
https://github.com/deepset-ai/haystack/issues/3970"""
pass
class _RayDeploymentWrapper:
"""
Ray Serve supports calling of __init__ methods on the Classes to create "deployment" instances.
In case of Haystack, some Components like Retrievers have complex init methods that needs objects
like Document Stores.
This wrapper class encapsulates the initialization of Components. Given a Component Class
name, it creates an instance using the YAML Pipeline config.
"""
node: BaseComponent
def __init__(self, pipeline_config: dict, component_name: str):
"""
Create an instance of Component.
:param pipeline_config: Pipeline YAML parsed as a dict.
:param component_name: Component Class name.
"""
if component_name in ["Query", "File"]:
self.node = RootNode()
else:
self.node = self.load_from_pipeline_config(pipeline_config, component_name)
def __call__(self, *args, **kwargs):
"""
Ray calls this method which is then re-directed to the corresponding component's run().
"""
return self.node._dispatch_run(*args, **kwargs)
@staticmethod
def load_from_pipeline_config(pipeline_config: dict, component_name: str):
"""
Load an individual component from a YAML config for Pipelines.
:param pipeline_config: the Pipelines YAML config parsed as a dict.
:param component_name: the name of the component to load.
"""
all_component_configs = pipeline_config["components"]
all_component_names = [comp["name"] for comp in all_component_configs]
component_config = next(comp for comp in all_component_configs if comp["name"] == component_name)
component_params = component_config["params"]
for key, value in component_params.items():
if value in all_component_names: # check if the param value is a reference to another component
component_params[key] = _RayDeploymentWrapper.load_from_pipeline_config(pipeline_config, value)
component_instance = BaseComponent._create_instance(
component_type=component_config["type"], component_params=component_params, name=component_name
)
return component_instance
|
2d2756f516f6899bce1ece648ac6ab8c5d9451bd
|
a2a7506038b69541523f98f2f98613d731ad2acf
|
/intensity_normalization/cli/nyul.py
|
93726ec9a761c29874826cc456226c9148ff2a02
|
[
"Apache-2.0"
] |
permissive
|
jcreinhold/intensity-normalization
|
163e3ed26ea80acef1513e448b7fef78a2c6ddf2
|
ce53748bb6b6721acad031d3b98a2e8f9921e4c6
|
refs/heads/master
| 2023-06-10T03:25:17.287607
| 2023-05-31T21:03:26
| 2023-05-31T21:03:26
| 136,844,850
| 283
| 62
|
NOASSERTION
| 2022-02-25T22:59:54
| 2018-06-10T21:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 337
|
py
|
nyul.py
|
"""CLI for Nyul & Udupa normalization
Author: Jacob Reinhold <jcreinhold@gmail.com>
Created on: 13 Oct 2021
"""
__all__ = ["nyul_main", "nyul_parser"]
from intensity_normalization.normalize.nyul import NyulNormalize
# main functions and parsers for CLI
nyul_parser = NyulNormalize.parser()
nyul_main = NyulNormalize.main(nyul_parser)
|
6ede53e97cc1dfbf44d0e837d39530f357f0d5b5
|
0af82f2ffe99791edf241c8664a88d6e154936fd
|
/modules/life.py
|
ae872287d465d5496a5f8b2d3aa18fbdfa29fe08
|
[
"MIT"
] |
permissive
|
yoda-pa/yoda
|
4d47a219d9adf283e1a43a842c10a4ef1faeaf97
|
83616f28e5e69aac40ed54247091a2f93f8f9464
|
refs/heads/master
| 2023-04-13T16:50:17.693638
| 2023-04-10T04:03:27
| 2023-04-10T04:03:27
| 77,321,225
| 777
| 245
|
MIT
| 2023-08-30T14:49:33
| 2016-12-25T10:35:07
|
Python
|
UTF-8
|
Python
| false
| false
| 17,989
|
py
|
life.py
|
from __future__ import division
from __future__ import absolute_import
from builtins import input
from builtins import str
from past.utils import old_div
import json
import os.path
import time
import datetime
from Crypto.Cipher import AES
from .config import get_config_file_paths
from modules.setup import cypher_pass_generator
from .util import *
from .alias import alias_checker
# config file path
LIFE_CONFIG_FILE_PATH = get_config_file_paths()["LIFE_CONFIG_FILE_PATH"]
LIFE_CONFIG_FOLDER_PATH = get_folder_path_from_file_path(LIFE_CONFIG_FILE_PATH)
RLIST_PARAMS = ("title", "author", "kind", "tags")
create_folder(LIFE_CONFIG_FOLDER_PATH)
def is_in_params(params, query, article):
"""
Get file path for today's tasks entry file
:param params:
:param query:
:param article:
:return:
"""
query = query.lower()
article_filter = article[params]
if type(article_filter) is list:
article_filter = [item.lower() for item in article_filter]
else:
article_filter = article_filter.lower()
return query in article_filter
@click.group()
def life():
"""
Life command group:\n
contains helpful commands to organize your life
"""
def reading_list_entry_file_path():
"""
Get complete path of the file reading_list.yaml
:return: path
"""
return os.path.join(LIFE_CONFIG_FOLDER_PATH, "reading_list.yaml")
READING_LIST_ENTRY_FILE_PATH = reading_list_entry_file_path()
def empty_list_prompt():
"""
Empty list prompt
"""
click.echo(
"You reading list is empty. Add something to the list, do you want to? (Y/n)"
)
decision = get_input().lower()
if decision == "y" or not decision:
add_to_reading_list()
else:
click.echo("Using 'yoda rlist add', you can create later.")
def print_reading_list(reading_list_contents, only=RLIST_PARAMS):
"""
prints reading list
:param reading_list_contents:
:param only:
"""
for i, entry in enumerate(reading_list_contents["entries"]):
click.echo("-" + ("[" + str(i) + "]").ljust(24, "-"))
title = entry["title"]
author = entry["author"]
kind = entry["kind"]
tags = entry["tags"]
click.echo("Title: " + title) if title and "title" in only else None
click.echo("Author: " + author) if author and "author" in only else None
click.echo("Kind: " + kind) if kind and "kind" in only else None
click.echo("Tags: " + ", ".join(tags)) if tags and "tags" in only else None
click.echo("---END-OF-READING-LIST---")
def view_reading_list(opts):
"""
get the current reading list
:param opts:
"""
if os.path.isfile(READING_LIST_ENTRY_FILE_PATH):
with open(READING_LIST_ENTRY_FILE_PATH) as reading_list_entry:
file_contents = yaml.load(reading_list_entry)
file_contents = dict(file_contents)
last_updated = time.ctime(os.path.getmtime(READING_LIST_ENTRY_FILE_PATH))
query = opts[1]
params = opts[0]
search = ""
if query != "None":
search = "(filtered by " + params + ": " + query + ")"
filtered_contents = [
article
for article in file_contents["entries"]
if is_in_params(params, query, article)
]
file_contents = dict(entries=filtered_contents)
click.echo(chalk.blue("Your awesome reading list " + search))
click.echo(chalk.blue("Last updated: " + last_updated))
print_reading_list(file_contents)
else:
empty_list_prompt()
def add_to_reading_list(query=""):
"""
add anything to the reading list
:param query:
"""
click.echo(chalk.blue("Title of the article:"))
_title = get_input()
while len(_title) == 0:
click.echo(chalk.red("No title, cannot be."))
click.echo(chalk.blue("Title of the article:"))
_title = get_input()
click.echo(chalk.blue("Author of the article:"))
_author = get_input()
click.echo(
chalk.blue("Article type/kind/genre (e.g. book, article, blog, sci-fi):")
)
_kind = get_input()
click.echo(chalk.blue("Tags for easier filtering/searching (seperated by spaces):"))
_tags = get_input().split()
setup_data = dict(title=_title, author=_author, kind=_kind, tags=_tags)
if os.path.isfile(READING_LIST_ENTRY_FILE_PATH):
append_data_into_file(setup_data, READING_LIST_ENTRY_FILE_PATH)
else:
setup_data = dict(entries=[setup_data])
create_folder(os.path.join(LIFE_CONFIG_FOLDER_PATH, "rlist"))
input_data(setup_data, READING_LIST_ENTRY_FILE_PATH)
click.echo(chalk.blue("Added " + _title + " to your reading list!"))
# the rlist process
@life.command()
@click.argument("subcommand", nargs=1)
@click.option("--params", nargs=1, required=False, default="tags")
@click.argument("query", nargs=1, required=False)
def rlist(sub_command, params, query):
"""
Reading list for your daily life
yoda rlist [OPTIONS] SUBCOMMAND [QUERY]
ACTION:
view [--params="tags"] [query]: view your reading list
params: reading list parameter to be filtered (defaults to tags)
query: keyword to be searched
add: add something to your reading list
"""
sub_command = str(sub_command)
params = str(params)
query = str(query)
opts = (params, query) if params and query else ()
# print opts
sub_commands = {"view": view_reading_list, "add": add_to_reading_list}
try:
sub_commands[sub_command](opts)
except KeyError:
click.echo(chalk.red("Command " + sub_command + " does not exist!"))
click.echo("Try 'yoda rlist --help' for more info'")
# idea list operations
# config file path
IDEA_CONFIG_FILE_PATH = get_config_file_paths()["IDEA_CONFIG_FILE_PATH"]
CONFIG_FILE_PATH = get_config_file_paths()["USER_CONFIG_FILE_PATH"]
cipher_key = cypher_pass_generator()
cipher_IV456 = cypher_pass_generator()
setup_data = dict(
name="",
email="",
github=dict(username="", password=""),
encryption=dict(cipher_key=cipher_key, cipher_IV456=cipher_IV456),
)
if not os.path.exists(os.path.dirname(CONFIG_FILE_PATH)):
try:
os.makedirs(os.path.dirname(CONFIG_FILE_PATH))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
if not os.path.isfile(CONFIG_FILE_PATH):
with open(CONFIG_FILE_PATH, "w") as config_file:
yaml.dump(setup_data, config_file, default_flow_style=False)
config_file = open(CONFIG_FILE_PATH)
contents = yaml.load(config_file)
cipher_key = contents["encryption"]["cipher_key"]
cipher_IV456 = contents["encryption"]["cipher_IV456"]
def encryption(text):
"""
encryption function for saving ideas
:param text:
:return:
"""
return AES.new(cipher_key, AES.MODE_CBC, cipher_IV456).encrypt(text * 16)
def decryption(text):
"""
decryption function for saving ideas
:param text:
:return:
"""
s = AES.new(cipher_key, AES.MODE_CBC, cipher_IV456).decrypt(text)
return s[: old_div(len(s), 16)]
def add_idea(project_name, task_name):
"""
a new entry created
:param project_name:
:param task_name:
"""
try:
with open(IDEA_CONFIG_FILE_PATH) as f:
data = f.read()
data = decryption(data)
data = json.loads(data)
f.close()
except:
data = None
if not isinstance(data, dict):
data = dict()
if project_name in data:
task = data[project_name]
else:
task = []
click.echo(chalk.blue("Brief desc of the current task : "))
desc = input()
task.append((task_name, desc)) # a new entry created
data[project_name] = task
with open(IDEA_CONFIG_FILE_PATH, "w") as f:
data = json.dumps(data)
data = encryption(data)
f.write(data)
f.close()
def show(project_name, task_name):
"""
all the saved entries are displayed
:param project_name:
:param task_name:
:return:
"""
try:
with open(IDEA_CONFIG_FILE_PATH) as f:
data = f.read()
data = decryption(data)
data = json.loads(data)
f.close()
except:
click.echo(
chalk.red(
'There are no saved ideas for now. Please run "yoda ideas add" to add a new idea'
)
)
return
for proj, task in list(data.items()):
click.echo(chalk.yellow(proj))
for _task_name, _task_description in task:
click.echo(chalk.cyan("\t" + _task_name))
click.echo(chalk.cyan("\t\t" + _task_description))
def remove(project, task=None):
"""
delete a whole entry or a sub-entry inside it
:param project:
:param task:
:return:
"""
try:
with open(IDEA_CONFIG_FILE_PATH) as f:
data = f.read()
data = decryption(data)
data = json.loads(data)
f.close()
except:
click.echo(chalk.red("File not exist, operation aborted."))
return
f.close()
try:
if task is None:
del data[project] # a project deleted
click.echo(chalk.blue("Project deleted successfully."))
else:
data[project] = [
x for x in data[project] if x[0] != task
] # task inside a respective project deleted
click.echo(chalk.blue("Task deleted successfully."))
with open(IDEA_CONFIG_FILE_PATH, "w") as f:
data = json.dumps(data)
data = encryption(data)
f.write(data)
f.close()
except:
click.echo(
chalk.red(
"Wrong task or project entered. Please check using 'yoda ideas show'"
)
)
# idea list process
@life.command()
@click.argument("subcommand", nargs=1)
@click.option("--task", nargs=1, required=False, default=None)
@click.option("--project", nargs=1, required=False, default=None)
@click.option("--inside", nargs=1, required=False, default=None)
def ideas(subcommand, task, project, inside):
"""
Keep track of your precious ideas.
yoda ideas SUBCOMMAND [OPTIONAL ARGUMENTS]
ACTION:
show : list out all the existing ideas
add : add a project or a task inside a project. You need to use either --project or --inside flag to
add a new project/task
remove : delete a task or a complete project. You need to use either --project or --inside flag to
remove a project/task
"""
if subcommand != "show" and (project or inside) is None:
click.echo(
chalk.red(
"Operation aborted. You have not selected any project or task. Please use this command with either "
"--project or --inside flag"
)
)
return
sub_commands = {"show": show, "add": add_idea, "remove": remove}
try:
sub_commands[subcommand]((project or inside), task)
except KeyError:
click.echo(chalk.red("Command " + subcommand + " does not exist."))
click.echo('Try "yoda ideas --help" for more info')
# ==========================================================================
# lease list operations
# config file path
LENDLIST_CONFIG_FILE_PATH = get_config_file_paths()["LEASELIST_CONFIG_FILE_PATH"]
LENDLIST_PARAMS = ("status", "item", "person", "enddate")
def empty_lending_list_prompt():
"""
Empty list prompt
"""
click.echo("Your lease list is empty. Add something to the list, do you want to? (y/n)")
decision = get_input().lower()
params = 0
if decision == "y" or not decision:
add_to_lending_list(params)
else:
click.echo("Using 'yoda leaselist add', you can create later.")
def print_lending_list(lending_list_contents, only=LENDLIST_PARAMS):
"""
prints reading list
:param reading_list_contents:
:param only:
"""
for i, entry in enumerate(lending_list_contents["entries"]):
click.echo("-" + ("[" + str(i + 1) + "]").ljust(24, "-"))
status = entry["status"]
item = entry["item"]
person = entry["person"]
enddate = entry["enddate"]
click.echo("Status: " + status) if status and "status" in only else None
click.echo("Item: " + item) if item and "item" in only else None
click.echo("Who: " + person) if person and "person" in only else None
if enddate > datetime.date.today():
click.echo("End Date: " + str(enddate)) if enddate and "enddate" in only else None
else:
click.echo(chalk.red("End Date: " + str(enddate)) if enddate and "enddate" in only else None)
click.echo("---END-OF-LEASE-LIST---")
def show_lending_list(params):
"""
shows all items lent and borrowed
"""
if os.path.isfile(LENDLIST_CONFIG_FILE_PATH):
with open(LENDLIST_CONFIG_FILE_PATH) as reading_list_entry:
file_contents = yaml.load(reading_list_entry)
file_contents = dict(file_contents)
last_updated = time.ctime(os.path.getmtime(LENDLIST_CONFIG_FILE_PATH))
click.echo(chalk.blue("Last updated: " + last_updated))
print_lending_list(file_contents)
else:
empty_lending_list_prompt()
def add_to_lending_list(params):
"""
add anything to the reading list
"""
click.echo(chalk.blue("Did you lend or borrow this item? (l/b)"))
def add_to_list(_status, _item, _person, _enddate):
setup_data = dict(status=_status, item=_item, person=_person, enddate=_enddate)
if os.path.isfile(LENDLIST_CONFIG_FILE_PATH):
append_data_into_file(setup_data, LENDLIST_CONFIG_FILE_PATH)
else:
setup_data = dict(entries=[setup_data])
create_folder(os.path.join(LIFE_CONFIG_FOLDER_PATH, "lendlist"))
input_data(setup_data, LENDLIST_CONFIG_FILE_PATH)
click.echo(chalk.blue("Added " + _item + " to your lease list!"))
def status_check(_status):
if _status == "l":
_status = "Lent"
click.echo(chalk.blue("What item did you lend?"))
_item = get_input()
click.echo(chalk.blue("Who did you lend it to?"))
_person = get_input()
while True:
try:
click.echo(chalk.blue("When do you need it back? (dd/mm/yyyy)"))
date_entry = get_input()
day, month, year = map(int, date_entry.split('/'))
_enddate = datetime.date(year, month, day)
break
except:
click.echo(chalk.red("Invalid date input!"))
add_to_list(_status, _item, _person, _enddate)
elif _status == "b":
_status = "Borrowed"
click.echo(chalk.blue("What item did you borrow?"))
_item = get_input()
click.echo(chalk.blue("Who did you borrow it from?"))
_person = get_input()
while True:
try:
click.echo(chalk.blue("When do you need to give it back? (dd/mm/yy)"))
date_entry = get_input()
day, month, year = map(int, date_entry.split('/'))
_enddate = datetime.date(year, month, day)
break
except:
click.echo(chalk.red("Invalid date input!"))
add_to_list(_status, _item, _person, _enddate)
else:
click.echo(chalk.red("Input not recognised! Type l for lent and b for borrowed."))
_status = get_input().lower()
status_check(_status)
_status = get_input().lower()
status_check(_status)
def remove_from_lending_list(params):
"""
remove an item from your lease list
"""
if os.path.isfile(LENDLIST_CONFIG_FILE_PATH):
with open(LENDLIST_CONFIG_FILE_PATH) as reading_list_entry:
file_contents = yaml.load(reading_list_entry)
file_contents = dict(file_contents)
while True:
try:
click.echo(chalk.blue("Enter your item's number. (Shown above it on the lease list)"))
number = get_input()
for i, entry in enumerate(file_contents["entries"]):
if i == int(number) - 1:
del file_contents["entries"][i]
break
except:
click.echo(chalk.red("That doesn't match any item. Try 'yoda leaselist show' to view your items."))
with open(LENDLIST_CONFIG_FILE_PATH, "w") as lease_entry:
yaml.dump(file_contents, lease_entry, default_flow_style=False)
click.echo(chalk.blue("Item successfully removed!"))
else:
empty_lending_list_prompt()
# leaselist process
@life.command()
@click.argument("subcommand", nargs=1)
@click.option("--params", nargs=1, required=False, default="tags")
def leaselist(subcommand, params):
"""
Keep track of items you have lent/borrowed.
ACTION:
show : lists all items you have lent or borrowed
add : add an item you have lent or borrowed
remove : remove an item from your lease list
"""
sub_commands = {"show": show_lending_list, "add": add_to_lending_list, "remove": remove_from_lending_list}
try:
sub_commands[subcommand](params)
except KeyError:
click.echo(chalk.red("Command " + subcommand + " does not exist!"))
click.echo("Try 'yoda leaselist --help' for more info'")
|
9fa3ca5e0d3888e3a818747ca52710df45a6ff54
|
807438e6974bf68762208ec24cf824dd0e5fabd6
|
/libcloud/common/aws.py
|
b575b66ccb5eb234302ce45537948efeeaffb55c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/libcloud
|
019c5bd839dedd2423f9604936886eaff252e04b
|
abba8c1719a8bda6db8efde2d46fd1b423ae4304
|
refs/heads/trunk
| 2023-08-31T20:14:22.369970
| 2023-08-21T20:17:57
| 2023-08-21T20:17:57
| 419,555
| 1,644
| 968
|
Apache-2.0
| 2023-09-13T19:34:44
| 2009-12-11T09:00:14
|
Python
|
UTF-8
|
Python
| false
| false
| 16,555
|
py
|
aws.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import time
import base64
import hashlib
from typing import Dict, Type, Optional
from hashlib import sha256
from datetime import datetime
from libcloud.utils.py3 import ET, b, httplib, urlquote, basestring, _real_unicode
from libcloud.utils.xml import findall_ignore_namespace, findtext_ignore_namespace
from libcloud.common.base import BaseDriver, XmlResponse, JsonResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError, MalformedResponseError
try:
import simplejson as json
except ImportError:
import json # type: ignore
__all__ = [
"AWSBaseResponse",
"AWSGenericResponse",
"AWSTokenConnection",
"SignedAWSConnection",
"AWSRequestSignerAlgorithmV2",
"AWSRequestSignerAlgorithmV4",
"AWSDriver",
]
DEFAULT_SIGNATURE_VERSION = "2"
UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
PARAMS_NOT_STRING_ERROR_MSG = """
"params" dictionary contains an attribute "%s" which value (%s, %s) is not a
string.
Parameters are sent via query parameters and not via request body and as such,
all the values need to be of a simple type (string, int, bool).
For arrays and other complex types, you should use notation similar to this
one:
params['TagSpecification.1.Tag.Value'] = 'foo'
params['TagSpecification.2.Tag.Value'] = 'bar'
See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html
for details.
""".strip()
class AWSBaseResponse(XmlResponse):
namespace = None
def _parse_error_details(self, element):
"""
Parse code and message from the provided error element.
:return: ``tuple`` with two elements: (code, message)
:rtype: ``tuple``
"""
code = findtext_ignore_namespace(element=element, xpath="Code", namespace=self.namespace)
message = findtext_ignore_namespace(
element=element, xpath="Message", namespace=self.namespace
)
return code, message
class AWSGenericResponse(AWSBaseResponse):
# There are multiple error messages in AWS, but they all have an Error node
# with Code and Message child nodes. Xpath to select them
# None if the root node *is* the Error node
xpath = None
# This dict maps <Error><Code>CodeName</Code></Error> to a specific
# exception class that is raised immediately.
# If a custom exception class is not defined, errors are accumulated and
# returned from the parse_error method.
exceptions = {} # type: Dict[str, Type[Exception]]
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
context = self.connection.context
status = int(self.status)
# FIXME: Probably ditch this as the forbidden message will have
# corresponding XML.
if status == httplib.FORBIDDEN:
if not self.body:
raise InvalidCredsError(str(self.status) + ": " + self.error)
else:
raise InvalidCredsError(self.body)
try:
body = ET.XML(self.body)
except Exception:
raise MalformedResponseError(
"Failed to parse XML", body=self.body, driver=self.connection.driver
)
if self.xpath:
errs = findall_ignore_namespace(
element=body, xpath=self.xpath, namespace=self.namespace
)
else:
errs = [body]
msgs = []
for err in errs:
code, message = self._parse_error_details(element=err)
exceptionCls = self.exceptions.get(code, None)
if exceptionCls is None:
msgs.append("{}: {}".format(code, message))
continue
# Custom exception class is defined, immediately throw an exception
params = {}
if hasattr(exceptionCls, "kwargs"):
for key in exceptionCls.kwargs:
if key in context:
params[key] = context[key]
raise exceptionCls(value=message, driver=self.connection.driver, **params)
return "\n".join(msgs)
class AWSTokenConnection(ConnectionUserAndKey):
def __init__(
self,
user_id,
key,
secure=True,
host=None,
port=None,
url=None,
timeout=None,
proxy_url=None,
token=None,
retry_delay=None,
backoff=None,
):
self.token = token
super().__init__(
user_id,
key,
secure=secure,
host=host,
port=port,
url=url,
timeout=timeout,
retry_delay=retry_delay,
backoff=backoff,
proxy_url=proxy_url,
)
def add_default_params(self, params):
# Even though we are adding it to the headers, we need it here too
# so that the token is added to the signature.
if self.token:
params["x-amz-security-token"] = self.token
return super().add_default_params(params)
def add_default_headers(self, headers):
if self.token:
headers["x-amz-security-token"] = self.token
return super().add_default_headers(headers)
class AWSRequestSigner:
"""
Class which handles signing the outgoing AWS requests.
"""
def __init__(self, access_key, access_secret, version, connection):
"""
:param access_key: Access key.
:type access_key: ``str``
:param access_secret: Access secret.
:type access_secret: ``str``
:param version: API version.
:type version: ``str``
:param connection: Connection instance.
:type connection: :class:`Connection`
"""
self.access_key = access_key
self.access_secret = access_secret
self.version = version
# TODO: Remove cycling dependency between connection and signer
self.connection = connection
def get_request_params(self, params, method="GET", path="/"):
return params
def get_request_headers(self, params, headers, method="GET", path="/", data=None):
return params, headers
class AWSRequestSignerAlgorithmV2(AWSRequestSigner):
def get_request_params(self, params, method="GET", path="/"):
params["SignatureVersion"] = "2"
params["SignatureMethod"] = "HmacSHA256"
params["AWSAccessKeyId"] = self.access_key
params["Version"] = self.version
params["Timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
params["Signature"] = self._get_aws_auth_param(
params=params, secret_key=self.access_secret, path=path
)
return params
def _get_aws_auth_param(self, params, secret_key, path="/"):
"""
Creates the signature required for AWS, per
http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
StringToSign = HTTPVerb + "\n" +
ValueOfHostHeaderInLowercase + "\n" +
HTTPRequestURI + "\n" +
CanonicalizedQueryString <from the preceding step>
"""
connection = self.connection
keys = list(params.keys())
keys.sort()
pairs = []
for key in keys:
value = str(params[key])
pairs.append(urlquote(key, safe="") + "=" + urlquote(value, safe="-_~"))
qs = "&".join(pairs)
hostname = connection.host
if (connection.secure and connection.port != 443) or (
not connection.secure and connection.port != 80
):
hostname += ":" + str(connection.port)
string_to_sign = "\n".join(("GET", hostname, path, qs))
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(string_to_sign), digestmod=sha256).digest()
)
return b64_hmac.decode("utf-8")
class AWSRequestSignerAlgorithmV4(AWSRequestSigner):
def get_request_params(self, params, method="GET", path="/"):
if method == "GET":
params["Version"] = self.version
return params
def get_request_headers(self, params, headers, method="GET", path="/", data=None):
now = datetime.utcnow()
headers["X-AMZ-Date"] = now.strftime("%Y%m%dT%H%M%SZ")
headers["X-AMZ-Content-SHA256"] = self._get_payload_hash(method, data)
headers["Authorization"] = self._get_authorization_v4_header(
params=params, headers=headers, dt=now, method=method, path=path, data=data
)
return params, headers
def _get_authorization_v4_header(self, params, headers, dt, method="GET", path="/", data=None):
credentials_scope = self._get_credential_scope(dt=dt)
signed_headers = self._get_signed_headers(headers=headers)
signature = self._get_signature(
params=params, headers=headers, dt=dt, method=method, path=path, data=data
)
return (
"AWS4-HMAC-SHA256 Credential=%(u)s/%(c)s, "
"SignedHeaders=%(sh)s, Signature=%(s)s"
% {
"u": self.access_key,
"c": credentials_scope,
"sh": signed_headers,
"s": signature,
}
)
def _get_signature(self, params, headers, dt, method, path, data):
key = self._get_key_to_sign_with(dt)
string_to_sign = self._get_string_to_sign(
params=params, headers=headers, dt=dt, method=method, path=path, data=data
)
return _sign(key=key, msg=string_to_sign, hex=True)
def _get_key_to_sign_with(self, dt):
return _sign(
_sign(
_sign(
_sign(("AWS4" + self.access_secret), dt.strftime("%Y%m%d")),
self.connection.driver.region_name,
),
self.connection.service_name,
),
"aws4_request",
)
def _get_string_to_sign(self, params, headers, dt, method, path, data):
canonical_request = self._get_canonical_request(
params=params, headers=headers, method=method, path=path, data=data
)
return "\n".join(
[
"AWS4-HMAC-SHA256",
dt.strftime("%Y%m%dT%H%M%SZ"),
self._get_credential_scope(dt),
_hash(canonical_request),
]
)
def _get_credential_scope(self, dt):
return "/".join(
[
dt.strftime("%Y%m%d"),
self.connection.driver.region_name,
self.connection.service_name,
"aws4_request",
]
)
def _get_signed_headers(self, headers):
return ";".join([k.lower() for k in sorted(headers.keys(), key=str.lower)])
def _get_canonical_headers(self, headers):
return (
"\n".join(
[
":".join([k.lower(), str(v).strip()])
for k, v in sorted(headers.items(), key=lambda k: k[0].lower())
]
)
+ "\n"
)
def _get_payload_hash(self, method, data=None):
if data is UnsignedPayloadSentinel:
return UNSIGNED_PAYLOAD
if method in ("POST", "PUT"):
if data:
if hasattr(data, "next") or hasattr(data, "__next__"):
# File upload; don't try to read the entire payload
return UNSIGNED_PAYLOAD
return _hash(data)
else:
return UNSIGNED_PAYLOAD
else:
return _hash("")
def _get_request_params(self, params):
# For self.method == GET
return "&".join(
[
"{}={}".format(urlquote(k, safe=""), urlquote(str(v), safe="~"))
for k, v in sorted(params.items())
]
)
def _get_canonical_request(self, params, headers, method, path, data):
return "\n".join(
[
method,
path,
self._get_request_params(params),
self._get_canonical_headers(headers),
self._get_signed_headers(headers),
self._get_payload_hash(method, data),
]
)
class UnsignedPayloadSentinel:
pass
class SignedAWSConnection(AWSTokenConnection):
version = None # type: Optional[str]
def __init__(
self,
user_id,
key,
secure=True,
host=None,
port=None,
url=None,
timeout=None,
proxy_url=None,
token=None,
retry_delay=None,
backoff=None,
signature_version=DEFAULT_SIGNATURE_VERSION,
):
super().__init__(
user_id=user_id,
key=key,
secure=secure,
host=host,
port=port,
url=url,
timeout=timeout,
token=token,
retry_delay=retry_delay,
backoff=backoff,
proxy_url=proxy_url,
)
self.signature_version = str(signature_version)
if self.signature_version == "2":
signer_cls = AWSRequestSignerAlgorithmV2
elif self.signature_version == "4":
signer_cls = AWSRequestSignerAlgorithmV4
else:
raise ValueError("Unsupported signature_version: %s" % (signature_version))
self.signer = signer_cls(
access_key=self.user_id,
access_secret=self.key,
version=self.version,
connection=self,
)
def add_default_params(self, params):
params = self.signer.get_request_params(params=params, method=self.method, path=self.action)
# Verify that params only contain simple types and no nested
# dictionaries.
# params are sent via query params so only strings are supported
for key, value in params.items():
if not isinstance(value, (_real_unicode, basestring, int, bool)):
msg = PARAMS_NOT_STRING_ERROR_MSG % (key, value, type(value))
raise ValueError(msg)
return params
def pre_connect_hook(self, params, headers):
params, headers = self.signer.get_request_headers(
params=params,
headers=headers,
method=self.method,
path=self.action,
data=self.data,
)
return params, headers
class AWSJsonResponse(JsonResponse):
"""
Amazon ECS response class.
ECS API uses JSON unlike the s3, elb drivers
"""
def parse_error(self):
response = json.loads(self.body)
code = response["__type"]
message = response.get("Message", response["message"])
return "{}: {}".format(code, message)
def _sign(key, msg, hex=False):
if hex:
return hmac.new(b(key), b(msg), hashlib.sha256).hexdigest()
else:
return hmac.new(b(key), b(msg), hashlib.sha256).digest()
def _hash(msg):
return hashlib.sha256(b(msg)).hexdigest()
class AWSDriver(BaseDriver):
def __init__(
self,
key,
secret=None,
secure=True,
host=None,
port=None,
api_version=None,
region=None,
token=None,
**kwargs,
):
self.token = token
super().__init__(
key,
secret=secret,
secure=secure,
host=host,
port=port,
api_version=api_version,
region=region,
token=token,
**kwargs,
)
def _ex_connection_class_kwargs(self):
kwargs = super()._ex_connection_class_kwargs()
kwargs["token"] = self.token
return kwargs
|
288f11c758be309fdb989a9839243638e0eb1d1b
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/roster/ansible.py
|
52c16cedb0d45e1bca1defbaa819f9ead99eff01
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,902
|
py
|
ansible.py
|
"""
Read in an Ansible inventory file or script.
Flat inventory files should be in the regular ansible inventory format.
.. code-block:: ini
# /tmp/example_roster
[servers]
salt.gtmanfred.com ansible_ssh_user=gtmanfred ansible_ssh_host=127.0.0.1 ansible_ssh_port=22 ansible_ssh_pass='password' ansible_sudo_pass='password'
[desktop]
home ansible_ssh_user=gtmanfred ansible_ssh_host=12.34.56.78 ansible_ssh_port=23 ansible_ssh_pass='password' ansible_sudo_pass='password'
[computers:children]
desktop
servers
[computers:vars]
http_port=80
then salt-ssh can be used to hit any of them
.. code-block:: bash
[~]# salt-ssh --roster=ansible --roster-file=/tmp/example_roster -N all test.ping
salt.gtmanfred.com:
True
home:
True
[~]# salt-ssh --roster=ansible --roster-file=/tmp/example_roster -N desktop test.ping
home:
True
[~]# salt-ssh --roster=ansible --roster-file=/tmp/example_roster -N computers test.ping
salt.gtmanfred.com:
True
home:
True
[~]# salt-ssh --roster=ansible --roster-file=/tmp/example_roster salt.gtmanfred.com test.ping
salt.gtmanfred.com:
True
There is also the option of specifying a dynamic inventory, and generating it on the fly
.. code-block:: bash
#!/bin/bash
# filename: /etc/salt/hosts
echo '{
"servers": [
"salt.gtmanfred.com"
],
"desktop": [
"home"
],
"computers": {
"hosts": [],
"children": [
"desktop",
"servers"
],
"vars": {
"http_port": 80
}
},
"_meta": {
"hostvars": {
"salt.gtmanfred.com": {
"ansible_ssh_user": "gtmanfred",
"ansible_ssh_host": "127.0.0.1",
"ansible_sudo_pass": "password",
"ansible_ssh_pass": "password",
"ansible_ssh_port": 22
},
"home": {
"ansible_ssh_user": "gtmanfred",
"ansible_ssh_host": "12.34.56.78",
"ansible_sudo_pass": "password",
"ansible_ssh_pass": "password",
"ansible_ssh_port": 23
}
}
}
}'
This is the format that an inventory script needs to output to work with ansible, and thus here.
.. code-block:: bash
[~]# salt-ssh --roster=ansible --roster-file /etc/salt/hosts salt.gtmanfred.com test.ping
salt.gtmanfred.com:
True
.. note::
A dynamic inventory script must have the executable bit set. In the above
example, ``chmod +x /etc/salt/hosts``.
Any of the [groups] or direct hostnames will return. The 'all' is special, and returns everything.
"""
import copy
import fnmatch
import salt.utils.ansible
import salt.utils.path
from salt.roster import get_roster_file
CONVERSION = {
"ansible_ssh_host": "host",
"ansible_ssh_port": "port",
"ansible_ssh_user": "user",
"ansible_ssh_pass": "passwd",
"ansible_sudo_pass": "sudo",
"ansible_ssh_private_key_file": "priv",
}
__virtualname__ = "ansible"
def __virtual__():
if salt.utils.path.which("ansible-inventory"):
return __virtualname__
else:
return False, "Install `ansible` to use inventory"
def targets(tgt, tgt_type="glob", **kwargs):
"""
Return the targets from the ansible inventory_file
Default: /etc/salt/roster
"""
__context__["inventory"] = salt.utils.ansible.targets(
inventory=get_roster_file(__opts__)
)
if tgt_type == "glob":
hosts = [
host for host in _get_hosts_from_group("all") if fnmatch.fnmatch(host, tgt)
]
elif tgt_type == "list":
hosts = [host for host in _get_hosts_from_group("all") if host in tgt]
elif tgt_type == "nodegroup":
hosts = _get_hosts_from_group(tgt)
else:
hosts = []
return {host: _get_hostvars(host) for host in hosts}
def _get_hosts_from_group(group):
inventory = __context__["inventory"]
if group not in inventory:
return []
hosts = [host for host in inventory[group].get("hosts", [])]
for child in inventory[group].get("children", []):
child_info = _get_hosts_from_group(child)
if child_info not in hosts:
hosts.extend(_get_hosts_from_group(child))
return hosts
def _get_hostvars(host):
hostvars = __context__["inventory"]["_meta"].get("hostvars", {}).get(host, {})
ret = copy.deepcopy(__opts__.get("roster_defaults", {}))
for value in CONVERSION:
if value in hostvars:
ret[CONVERSION[value]] = hostvars.pop(value)
ret["minion_opts"] = hostvars
if "host" not in ret:
ret["host"] = host
return ret
|
6c0b0e50b22fce8e53dd207ae0b3d2be280c0bdd
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowPlatformTcamAcl/cli/equal/golden_output_1_expected.py
|
3a7242b823ce309d5869f43ef9cb9c786119ed5c
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
golden_output_1_expected.py
|
expected_output = {
'index': {
'1152': {
'labels': {
'M:': {
'dstaddr': 00000000,
'l3err': 00,
'l3len': 0000,
'l3pro': 00,
'l3tos': 00,
'mtrid': 00,
'mvid': 000,
'sh': 0000,
'srcaddr': 00000000,
'vcu_results': 00000000,
'vrfid': 0000
},
'V:': {
'dstaddr': 00000000,
'l3err': 00,
'l3len': 0000,
'l3pro': 00,
'l3tos': 00,
'mtrid': 00,
'mvid': 000,
'sh': 0000,
'srcaddr': 00000000,
'vcu_results': 00000000,
'vrfid': 0000
},
},
'nat_dynamic_rule': 0,
'nat_result_rm': 1,
'nat_static_rule': 1
}
}
}
|
e17b99cd2aaf418d35985fd3ee476b3551469320
|
eced4731f9ad808ed47f037ebf5462b98376583f
|
/arelle/XhtmlValidate.py
|
f0ce874645b7cfe22e008bfb58faf7dbb9ae2b71
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"MIT"
] |
permissive
|
Arelle/Arelle
|
05afdec4eb5a13cdd981accdbd01b87bad72ea5e
|
2c522ed0ffe57e273d6596dd38bf62568d2eb4d8
|
refs/heads/master
| 2023-08-30T21:53:05.997184
| 2023-08-29T16:00:01
| 2023-08-29T16:00:01
| 1,866,915
| 395
| 253
|
NOASSERTION
| 2023-09-14T14:13:10
| 2011-06-08T18:33:19
|
Python
|
UTF-8
|
Python
| false
| false
| 6,462
|
py
|
XhtmlValidate.py
|
'''
See COPYRIGHT.md for copyright information.
(originally part of XmlValidate, moved to separate module)
'''
from arelle import XbrlConst, XmlUtil, XmlValidate, ValidateFilingText, UrlUtil
from arelle.ModelXbrl import ModelXbrl
from arelle.ModelObject import ModelObject
from lxml import etree
import os, posixpath
htmlEltUriAttrs = { # attributes with URI content (for relative correction and %20 canonicalization
"a": {"href"},
"area": {"href"},
"blockquote": {"cite"},
"del": {"cite"},
"form": {"action"},
"input": {"src", "usemap"},
"ins": {"cite"},
"img": {"src", "longdesc", "usemap"},
"object": ("codebase", "classid", "data", "archive", "usemap"), # codebase must be first to reolve others
"q": {"cite"},
}
ixSect = {
XbrlConst.ixbrl: {
"footnote": {"constraint": "ix10.5.1.1", "validation": "ix10.5.1.2"},
"fraction": {"constraint": "ix10.6.1.1", "validation": "ix10.6.1.2"},
"denominator": {"constraint": "ix10.6.1.1", "validation": "ix10.6.1.2"},
"numerator": {"constraint": "ix10.6.1.1", "validation": "ix10.6.1.2"},
"header": {"constraint": "ix10.7.1.1", "non-validatable": "ix10.7.1.2", "validation": "ix10.7.1.3"},
"hidden": {"constraint": "ix10.8.1.1", "validation": "ix10.8.1.2"},
"nonFraction": {"constraint": "ix10.9.1.1", "validation": "ix10.9.1.2"},
"nonNumeric": {"constraint": "ix10.10.1.1", "validation": "ix10.10.1.2"},
"references": {"constraint": "ix10.11.1.1", "validation": "ix10.11.1.2"},
"resources": {"constraint": "ix10.12.1.1", "validation": "ix10.12.1.2"},
"tuple": {"constraint": "ix10.13.1.1", "validation": "ix10.13.1.2"},
"other": {"constraint": "ix10", "validation": "ix10"}},
XbrlConst.ixbrl11: {
"continuation": {"constraint": "ix11.4.1.1", "validation": "ix11.4.1.2"},
"exclude": {"constraint": "ix11.5.1.1", "validation": "ix11.5.1.2"},
"footnote": {"constraint": "ix11.6.1.1", "validation": "ix11.6.1.2"},
"fraction": {"constraint": "ix11.7.1.2", "validation": "ix11.7.1.3"},
"denominator": {"constraint": "ix11.7.1.1", "validation": "ix11.7.1.3"},
"numerator": {"constraint": "ix11.7.1.1", "validation": "ix11.7.1.3"},
"header": {"constraint": "ix11.8.1.1", "non-validatable": "ix11.8.1.2", "validation": "ix11.8.1.3"},
"hidden": {"constraint": "ix11.9.1.1", "validation": "ix11.9.1.2"},
"nonFraction": {"constraint": "ix11.10.1.1", "validation": "ix11.10.1.2"},
"nonNumeric": {"constraint": "ix11.11.1.1", "validation": "ix11.11.1.2"},
"references": {"constraint": "ix11.12.1.1", "validation": "ix11.12.1.2"},
"relationship": {"constraint": "ix11.13.1.1", "validation": "ix11.13.1.2"},
"resources": {"constraint": "ix11.14.1.1", "validation": "ix11.14.1.2"},
"tuple": {"constraint": "ix11.15.1.1", "validation": "ix11.15.1.2"},
"other": {"constraint": "ix11", "validation": "ix11"}}
}
INLINE_1_0_SCHEMA = "http://www.xbrl.org/2008/inlineXBRL/xhtml-inlinexbrl-1_0.xsd"
INLINE_1_1_SCHEMA = "http://www.xbrl.org/2013/inlineXBRL/xhtml-inlinexbrl-1_1.xsd"
def ixMsgCode(codeName, elt=None, sect="constraint", ns=None, name=None) -> str:
if elt is None:
if ns is None: ns = XbrlConst.ixbrl11
if name is None: name = "other"
else:
if ns is None and elt.namespaceURI in XbrlConst.ixbrlAll:
ns = elt.namespaceURI
else:
ns = getattr(elt.modelDocument, "ixNS", XbrlConst.ixbrl11)
if name is None:
name = elt.localName
if name in ("context", "unit"):
name = "resources"
return "{}:{}".format(ixSect[ns].get(name,"other")[sect], codeName)
def xhtmlValidate(modelXbrl: ModelXbrl, elt: ModelObject) -> None:
from lxml.etree import XMLSyntaxError
validateEntryText = modelXbrl.modelManager.disclosureSystem.validateEntryText
if validateEntryText:
valHtmlContentMsgPrefix = modelXbrl.modelManager.disclosureSystem.validationType + ".5.02.05."
inlineSchema = INLINE_1_1_SCHEMA
if containsNamespacedElements(elt, XbrlConst.ixbrl) and not containsNamespacedElements(elt, XbrlConst.ixbrl11):
inlineSchema = INLINE_1_0_SCHEMA
XmlValidate.lxmlSchemaValidate(elt.modelDocument, inlineSchema)
# lxml bug: doesn't detect: class="" (min length 1)
for e in elt.getroottree().iterfind("//{http://www.w3.org/1999/xhtml}*[@class='']"):
modelXbrl.error("arelle:xhtmlClassError",
_("Attribute class must not be empty on element ix:%(element)s"),
modelObject=e, element=e.localName)
try:
if validateEntryText:
ValidateFilingText.validateHtmlContent(modelXbrl, elt, elt, "InlineXBRL", valHtmlContentMsgPrefix, isInline=True)
except XMLSyntaxError as err:
modelXbrl.error("html:syntaxError",
_("%(element)s error %(error)s"),
modelObject=elt, element=elt.localName.title(), error=', '.join(dtdErrs()))
def containsNamespacedElements(elt: etree.ElementBase, namespace: str) -> bool:
return elt.getroottree().find("//ns:*", {"ns": namespace}) is not None
def resolveHtmlUri(elt, name, value):
if name == "archive": # URILIST
return " ".join(resolveHtmlUri(elt, "archiveListElement", v) for v in value.split(" "))
if not UrlUtil.isAbsolute(value):
if elt.localName == "object" and name in ("classid", "data", "archiveListElement") and elt.get("codebase"):
base = elt.get("codebase") + "/"
else:
base = getattr(elt.modelDocument, "htmlBase") # None if no htmlBase, empty string if it's not set
if base:
if value.startswith("/"): # add to authority
value = UrlUtil.authority(base) + value
elif value.startswith("#"): # add anchor to base document
value = base + value
else:
value = os.path.dirname(base) + "/" + value
# canonicalize ../ and ./
scheme, sep, pathpart = value.rpartition("://")
if sep:
pathpart = pathpart.replace('\\','/')
endingSep = '/' if pathpart[-1] == '/' else '' # normpath drops ending directory separator
_uri = scheme + "://" + posixpath.normpath(pathpart) + endingSep
else:
_uri = posixpath.normpath(value)
return _uri # .replace(" ", "%20") requirement for this is not yet clear
|
85b32ec0f7030e95b68efc3f4655d1f94e8c2f91
|
0a8a4bfd6b4ffcfb7c99119c83cb3abe17c4a8f6
|
/openhtf/plugs/__init__.py
|
f3dfbc0be97a24045391961e2e802fa2e2523b73
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
google/openhtf
|
58c06e07508f9bb2079070a5ac03898fc68c1778
|
3a9a24987b2b34782fca55a8df8d007167dbb19a
|
refs/heads/master
| 2023-08-23T12:12:54.917649
| 2023-07-27T01:51:17
| 2023-07-27T01:51:43
| 41,519,483
| 471
| 253
|
Apache-2.0
| 2023-09-12T00:47:42
| 2015-08-28T01:14:17
|
Python
|
UTF-8
|
Python
| false
| false
| 14,633
|
py
|
__init__.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The plugs module provides managing plugs.
Test phases can be decorated as using Plug objects, which then get passed
into the test via parameters. Plugs are all instantiated at the
beginning of a test, and all plugs' tearDown() methods are called at the
end of a test. It's up to the Plug implementation to do any sort of
is-ready check.
"""
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Text, Tuple, Type, TypeVar, Union
import attr
from openhtf.core import base_plugs
from openhtf.core import phase_descriptor
from openhtf.util import configuration
from openhtf.util import data
from openhtf.util import threads
CONF = configuration.CONF
_LOG = logging.getLogger(__name__)
_BASE_PLUGS_LOG = base_plugs._LOG # pylint: disable=protected-access
CONF.declare(
'plug_teardown_timeout_s',
default_value=0,
description='Timeout (in seconds) for each plug tearDown function if > 0; '
'otherwise, will wait an unlimited time.')
# TODO(arsharma): Remove this aliases when users have moved to using the core
# library.
BasePlug = base_plugs.BasePlug
FrontendAwareBasePlug = base_plugs.FrontendAwareBasePlug
@attr.s(slots=True, frozen=True)
class PlugDescriptor(object):
mro = attr.ib(type=List[Text])
class PlugOverrideError(Exception):
"""Raised when a plug would be overridden by a kwarg."""
class DuplicatePlugError(Exception):
"""Raised when the same plug is required multiple times on a phase."""
def plug(
update_kwargs: bool = True,
**plugs_map: Union[Type[base_plugs.BasePlug], base_plugs.PlugPlaceholder]
) -> Callable[['phase_descriptor.PhaseT'], 'phase_descriptor.PhaseDescriptor']:
"""Creates a decorator that passes in plugs when invoked.
This function returns a decorator for a function that will replace positional
arguments to that function with the plugs specified. See the module
docstring for details and examples.
Note this decorator does not work with class or bound methods, but does work
with @staticmethod.
Args:
update_kwargs: If true, makes the decorated phase take this plug as a kwarg.
**plugs_map: Dict mapping name to Plug type.
Returns:
A PhaseDescriptor that will pass plug instances in as kwargs when invoked.
Raises:
base_plugs.InvalidPlugError: If a type is provided that is not a subclass of
BasePlug.
"""
for a_plug in plugs_map.values():
if not (isinstance(a_plug, base_plugs.PlugPlaceholder) or
issubclass(a_plug, base_plugs.BasePlug)):
raise base_plugs.InvalidPlugError(
'Plug %s is not a subclass of base_plugs.BasePlug nor a placeholder '
'for one' % a_plug)
def result(
func: 'phase_descriptor.PhaseT') -> 'phase_descriptor.PhaseDescriptor':
"""Wrap the given function and return the wrapper.
Args:
func: The function to wrap.
Returns:
A PhaseDescriptor that, when called will invoke the wrapped function,
passing plugs as keyword args.
Raises:
DuplicatePlugError: If a plug name is declared twice for the
same function.
"""
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(func)
duplicates = (frozenset(p.name for p in phase.plugs) & frozenset(plugs_map))
if duplicates:
raise DuplicatePlugError('Plugs %s required multiple times on phase %s' %
(duplicates, func))
phase.plugs.extend([
base_plugs.PhasePlug(name, a_plug, update_kwargs=update_kwargs)
for name, a_plug in plugs_map.items()
])
return phase
return result
class _PlugTearDownThread(threads.KillableThread):
"""Killable thread that runs a plug's tearDown function."""
def __init__(self, a_plug: base_plugs.BasePlug, *args: Any, **kwargs: Any):
super(_PlugTearDownThread, self).__init__(*args, **kwargs)
self._plug = a_plug
def _thread_proc(self) -> None:
try:
self._plug.tearDown()
except Exception: # pylint: disable=broad-except
# Including the stack trace from ThreadTerminationErrors received when
# killed.
_LOG.warning(
'Exception calling tearDown on %s:', self._plug, exc_info=True)
PlugT = TypeVar('PlugT', bound=base_plugs.BasePlug)
class PlugManager(object):
"""Class to manage the lifetimes of plugs.
This class handles instantiation of plugs at test start and calling
tearDown() on all plugs when the test completes. It is used by
the executor, and should not be instantiated outside the framework itself.
Note this class is not thread-safe. It should only ever be used by the
main framework thread anyway.
Attributes:
_plug_types: Initial set of plug types, additional plug types may be passed
into calls to initialize_plugs().
_plugs_by_type: Dict mapping plug type to plug instance.
_plugs_by_name: Dict mapping plug name to plug instance.
_plug_descriptors: Dict mapping plug type to plug descriptor.
logger: logging.Logger instance that can save logs to the running test
record.
"""
def __init__(self,
plug_types: Optional[Set[Type[base_plugs.BasePlug]]] = None,
record_logger: Optional[logging.Logger] = None):
self._plug_types = plug_types or set()
for plug_type in self._plug_types:
if isinstance(plug_type, base_plugs.PlugPlaceholder):
raise base_plugs.InvalidPlugError(
'Plug {} is a placeholder, replace it using with_plugs().'.format(
plug_type))
self._plugs_by_type = {}
self._plugs_by_name = {}
self._plug_descriptors = {}
if not record_logger:
record_logger = _LOG
self.logger = record_logger.getChild('plug')
def as_base_types(self) -> Dict[Text, Any]:
return {
'plug_descriptors': {
name: attr.asdict(descriptor)
for name, descriptor in self._plug_descriptors.items()
},
'plug_states': {
name: data.convert_to_base_types(plug)
for name, plug in self._plugs_by_name.items()
},
}
def _make_plug_descriptor(
self, plug_type: Type[base_plugs.BasePlug]) -> PlugDescriptor:
"""Returns the plug descriptor, containing info about this plug type."""
return PlugDescriptor(self.get_plug_mro(plug_type))
def get_plug_mro(self, plug_type: Type[base_plugs.BasePlug]) -> List[Text]:
"""Returns a list of names identifying the plug classes in the plug's MRO.
For example:
['openhtf.plugs.user_input.UserInput']
Or:
['openhtf.plugs.user_input.UserInput',
'my_module.advanced_user_input.AdvancedUserInput']
Args:
plug_type: The plug class to get the MRO for.
"""
ignored_classes = (base_plugs.BasePlug, base_plugs.FrontendAwareBasePlug)
return [
self.get_plug_name(base_class) # pylint: disable=g-complex-comprehension
for base_class in plug_type.mro()
if (issubclass(base_class, base_plugs.BasePlug) and
base_class not in ignored_classes)
]
def get_plug_name(self, plug_type: Type[base_plugs.BasePlug]) -> Text:
"""Returns the plug's name, which is the class name and module.
For example:
'openhtf.plugs.user_input.UserInput'
Args:
plug_type: The plug class to get the name of.
"""
return '%s.%s' % (plug_type.__module__, plug_type.__name__)
def initialize_plugs(
self,
plug_types: Optional[Set[Type[base_plugs.BasePlug]]] = None) -> None:
"""Instantiate required plugs.
Instantiates plug types and saves the instances in self._plugs_by_type for
use in provide_plugs().
Args:
plug_types: Plug types may be specified here rather than passed into the
constructor (this is used primarily for unit testing phases).
"""
types = plug_types if plug_types is not None else self._plug_types
for plug_type in types:
# Create a logger for this plug. All plug loggers go under the 'plug'
# sub-logger in the logger hierarchy.
plug_logger = self.logger.getChild(plug_type.__name__)
if plug_type in self._plugs_by_type:
continue
try:
if not issubclass(plug_type, base_plugs.BasePlug):
raise base_plugs.InvalidPlugError(
'Plug type "{}" is not an instance of base_plugs.BasePlug'.format(
plug_type))
if plug_type.logger != _BASE_PLUGS_LOG:
# They put a logger attribute on the class itself, overriding ours.
raise base_plugs.InvalidPlugError(
'Do not override "logger" in your plugs.', plug_type)
# Override the logger so that __init__'s logging goes into the record.
plug_type.logger = plug_logger
try:
plug_instance = plug_type()
finally:
# Now set it back since we'll give the instance a logger in a moment.
plug_type.logger = _BASE_PLUGS_LOG
# Set the logger attribute directly (rather than in base_plugs.BasePlug)
# so we don't depend on subclasses' implementation of __init__ to have
# it set.
if plug_instance.logger != _BASE_PLUGS_LOG:
raise base_plugs.InvalidPlugError(
'Do not set "self.logger" in __init__ in your plugs', plug_type)
else:
# Now the instance has its own copy of the test logger.
plug_instance.logger = plug_logger
except Exception: # pylint: disable=broad-except
plug_logger.exception('Exception instantiating plug type %s', plug_type)
self.tear_down_plugs()
raise
self.update_plug(plug_type, plug_instance)
def get_plug_by_class_path(self,
plug_name: Text) -> Optional[base_plugs.BasePlug]:
"""Get a plug instance by name (class path).
This provides a way for extensions to OpenHTF to access plug instances for
a running test via that test's plug manager.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
Returns:
The plug manager's instance of the specified plug.
"""
return self._plugs_by_name.get(plug_name)
def update_plug(self, plug_type: Type[PlugT], plug_value: PlugT) -> None:
"""Update internal data stores with the given plug value for plug type.
Safely tears down the old instance if one was already created, but that's
generally not the case outside unittests. Also, we explicitly pass the
plug_type rather than detecting it from plug_value to allow unittests to
override plugs with Mock instances.
Note this should only be used inside unittests, as this mechanism is not
compatible with RemotePlug support.
Args:
plug_type: The plug class to update.
plug_value: The plug class instance to store.
"""
self._plug_types.add(plug_type)
if plug_type in self._plugs_by_type:
self._plugs_by_type[plug_type].tearDown()
plug_name = self.get_plug_name(plug_type)
self._plugs_by_type[plug_type] = plug_value
self._plugs_by_name[plug_name] = plug_value
self._plug_descriptors[plug_name] = self._make_plug_descriptor(plug_type)
def provide_plugs(
self, plug_name_map: Iterable[Tuple[Text, Type[base_plugs.BasePlug]]]
) -> Dict[Text, base_plugs.BasePlug]:
"""Provide the requested plugs [(name, type),] as {name: plug instance}."""
return {name: self._plugs_by_type[cls] for name, cls in plug_name_map}
def tear_down_plugs(self) -> None:
"""Call tearDown() on all instantiated plugs.
Note that initialize_plugs must have been called before calling
this method, and initialize_plugs must be called again after calling
this method if you want to access the plugs attribute again.
Any exceptions in tearDown() methods are logged, but do not get raised
by this method.
"""
_LOG.debug('Tearing down all plugs.')
for plug_type, plug_instance in self._plugs_by_type.items():
if plug_instance.uses_base_tear_down():
name = '<PlugTearDownThread: BasePlug No-Op for %s>' % plug_type
else:
name = '<PlugTearDownThread: %s>' % plug_type
thread = _PlugTearDownThread(plug_instance, name=name)
thread.start()
timeout_s = (
CONF.plug_teardown_timeout_s
if CONF.plug_teardown_timeout_s else None)
thread.join(timeout_s)
if thread.is_alive():
thread.kill()
_LOG.warning('Killed tearDown for plug %s after timeout.',
plug_instance)
self._plugs_by_type.clear()
self._plugs_by_name.clear()
def wait_for_plug_update(
self, plug_name: Text, remote_state: Dict[Text, Any],
timeout_s: Union[int, float]) -> Optional[Dict[Text, Any]]:
"""Wait for a change in the state of a frontend-aware plug.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
remote_state: The last observed state.
timeout_s: Number of seconds to wait for an update.
Returns:
An updated state, or None if the timeout runs out.
Raises:
base_plugs.InvalidPlugError: The plug can't be waited on either because
it's not in use or it's not a frontend-aware plug.
"""
plug_instance = self._plugs_by_name.get(plug_name)
if plug_instance is None:
raise base_plugs.InvalidPlugError(
'Cannot wait on unknown plug "{}".'.format(plug_name))
if not isinstance(plug_instance, base_plugs.FrontendAwareBasePlug):
raise base_plugs.InvalidPlugError(
'Cannot wait on a plug {} that is not an subclass '
'of FrontendAwareBasePlug.'.format(plug_name))
state, update_event = plug_instance.asdict_with_event()
if state != remote_state:
return state
if update_event.wait(timeout_s):
return plug_instance._asdict()
def get_frontend_aware_plug_names(self) -> List[Text]:
"""Returns the names of frontend-aware plugs."""
return [
name for name, plug in self._plugs_by_name.items()
if isinstance(plug, base_plugs.FrontendAwareBasePlug)
]
|
2ebb7e852fdd28955e5da4ec5fd58949e64ef5d7
|
c899cccda9a8cd73a1942a4f8b897e6d5347a246
|
/tests/cli/test_shell.py
|
c95d918c9198c68dff1d6438cf8bd8f2f5962012
|
[
"MIT"
] |
permissive
|
tmux-python/tmuxp
|
f2a03749058750e0c378f4179e844d702be182d7
|
e0880e7574fab66c8be5a1fb521938733d3056f4
|
refs/heads/master
| 2023-09-04T12:31:16.099042
| 2023-09-04T12:13:40
| 2023-09-04T12:13:40
| 12,398,170
| 2,072
| 144
|
MIT
| 2023-09-02T10:32:11
| 2013-08-27T05:51:11
|
Python
|
UTF-8
|
Python
| false
| false
| 7,301
|
py
|
test_shell.py
|
import contextlib
import io
import pathlib
import subprocess
import typing as t
import pytest
from libtmux.server import Server
from libtmux.session import Session
from tmuxp import cli, exc
@pytest.mark.parametrize("cli_cmd", [["shell"], ["shell", "--pdb"]])
@pytest.mark.parametrize(
"cli_args,inputs,env,expected_output",
[
(
["-L{SOCKET_NAME}", "-c", "print(str(server.socket_name))"],
[],
{},
"{SERVER_SOCKET_NAME}",
),
(
[
"-L{SOCKET_NAME}",
"{SESSION_NAME}",
"-c",
"print(session.name)",
],
[],
{},
"{SESSION_NAME}",
),
(
[
"-L{SOCKET_NAME}",
"{SESSION_NAME}",
"{WINDOW_NAME}",
"-c",
"print(server.has_session(session.name))",
],
[],
{},
"True",
),
(
[
"-L{SOCKET_NAME}",
"{SESSION_NAME}",
"{WINDOW_NAME}",
"-c",
"print(window.name)",
],
[],
{},
"{WINDOW_NAME}",
),
(
[
"-L{SOCKET_NAME}",
"{SESSION_NAME}",
"{WINDOW_NAME}",
"-c",
"print(pane.id)",
],
[],
{},
"{PANE_ID}",
),
(
[
"-L{SOCKET_NAME}",
"-c",
"print(pane.id)",
],
[],
{"TMUX_PANE": "{PANE_ID}"},
"{PANE_ID}",
),
],
)
def test_shell(
cli_cmd: t.List[str],
cli_args: t.List[str],
inputs: t.List[t.Any],
expected_output: str,
env: t.Dict[str, str],
server: "Server",
session: Session,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture,
) -> None:
monkeypatch.setenv("HOME", str(tmp_path))
window_name = "my_window"
window = session.new_window(window_name=window_name)
window.split_window()
assert window.attached_pane is not None
template_ctx = {
"SOCKET_NAME": server.socket_name,
"SESSION_NAME": session.name,
"WINDOW_NAME": window_name,
"PANE_ID": window.attached_pane.id,
"SERVER_SOCKET_NAME": server.socket_name,
}
cli_args = cli_cmd + [cli_arg.format(**template_ctx) for cli_arg in cli_args]
for k, v in env.items():
monkeypatch.setenv(k, v.format(**template_ctx))
monkeypatch.chdir(tmp_path)
cli.cli(cli_args)
result = capsys.readouterr()
assert expected_output.format(**template_ctx) in result.out
@pytest.mark.parametrize(
"cli_cmd",
[
["shell"],
["shell", "--pdb"],
],
)
@pytest.mark.parametrize(
"cli_args,inputs,env,template_ctx,exception,message",
[
(
["-LDoesNotExist", "-c", "print(str(server.socket_name))"],
[],
{},
{},
subprocess.CalledProcessError,
r".*DoesNotExist.*",
),
(
[
"-L{SOCKET_NAME}",
"nonexistent_session",
"-c",
"print(str(server.socket_name))",
],
[],
{},
{"session_name": "nonexistent_session"},
exc.TmuxpException,
"Session not found: nonexistent_session",
),
(
[
"-L{SOCKET_NAME}",
"{SESSION_NAME}",
"nonexistent_window",
"-c",
"print(str(server.socket_name))",
],
[],
{},
{"window_name": "nonexistent_window"},
exc.TmuxpException,
"Window not found: {WINDOW_NAME}",
),
],
)
def test_shell_target_missing(
cli_cmd: t.List[str],
cli_args: t.List[str],
inputs: t.List[t.Any],
env: t.Dict[t.Any, t.Any],
template_ctx: t.Dict[str, str],
exception: t.Union[
t.Type[exc.TmuxpException], t.Type[subprocess.CalledProcessError]
],
message: str,
socket_name: str,
server: "Server",
session: Session,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture,
) -> None:
monkeypatch.setenv("HOME", str(tmp_path))
window_name = "my_window"
window = session.new_window(window_name=window_name)
window.split_window()
assert server.socket_name is not None
assert session.name is not None
template_ctx.update(
{
"SOCKET_NAME": server.socket_name,
"SESSION_NAME": session.name,
"WINDOW_NAME": template_ctx.get("window_name", window_name),
}
)
cli_args = cli_cmd + [cli_arg.format(**template_ctx) for cli_arg in cli_args]
for k, v in env.items():
monkeypatch.setenv(k, v.format(**template_ctx))
monkeypatch.chdir(tmp_path)
if exception is not None:
with pytest.raises(exception, match=message.format(**template_ctx)):
cli.cli(cli_args)
else:
cli.cli(cli_args)
result = capsys.readouterr()
assert message.format(**template_ctx) in result.out
@pytest.mark.parametrize(
"cli_cmd",
[
# ['shell'],
# ['shell', '--pdb'),
["shell", "--code"],
# ['shell', '--bpython'],
# ['shell', '--ptipython'],
# ['shell', '--ptpython'],
# ['shell', '--ipython'],
],
)
@pytest.mark.parametrize(
"cli_args,inputs,env,message",
[
(
[
"-L{SOCKET_NAME}",
],
[],
{},
"(InteractiveConsole)",
),
(
[
"-L{SOCKET_NAME}",
],
[],
{"PANE_ID": "{PANE_ID}"},
"(InteractiveConsole)",
),
],
)
def test_shell_interactive(
cli_cmd: t.List[str],
cli_args: t.List[str],
inputs: t.List[t.Any],
env: t.Dict[str, str],
message: str,
server: "Server",
session: Session,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture,
) -> None:
monkeypatch.setenv("HOME", str(tmp_path))
window_name = "my_window"
window = session.new_window(window_name=window_name)
window.split_window()
assert window.attached_pane is not None
template_ctx = {
"SOCKET_NAME": server.socket_name,
"SESSION_NAME": session.name,
"WINDOW_NAME": window_name,
"PANE_ID": window.attached_pane.id,
"SERVER_SOCKET_NAME": server.socket_name,
}
cli_args = cli_cmd + [cli_arg.format(**template_ctx) for cli_arg in cli_args]
for k, v in env.items():
monkeypatch.setenv(k, v.format(**template_ctx))
monkeypatch.chdir(tmp_path)
monkeypatch.setattr("sys.stdin", io.StringIO("exit()\r"))
with contextlib.suppress(SystemExit):
cli.cli(cli_args)
result = capsys.readouterr()
assert message.format(**template_ctx) in result.err
|
7d4346c7c9722be0b41cd9059a674f7a0a239d0a
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/config/TestConfigFile.py
|
44555049428693ec178b13920c2724eabb155753
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
TestConfigFile.py
|
import unittest
import configargparse
from checkov.common.util.ext_argument_parser import ExtArgumentParser
class TestConfigFile(unittest.TestCase):
def test_pass(self):
argv = ['--ca-certificate', '----- BEGIN CERTIFICATE ----- <KEY> ----- END CERTIFICATE -----',
'--compact', '--directory', 'test-dir', '--docker-image', 'sample-image', '--dockerfile-path',
'Dockerfile', '--download-external-modules', 'True', '--evaluate-variables', 'False',
'--external-checks-dir', 'sample-dir', '--external-checks-git', 'sample-github-url', '--file',
'sample.tf', '--framework', 'all', '--no-guide', '--output', 'cli', '--quiet', '--repo-id',
'bridgecrew/sample-repo', '--skip-check', 'CKV_DOCKER_3,CKV_DOCKER_2', '--skip-fixes',
'--skip-framework', 'dockerfile', '--skip-suppressions', '--soft-fail', '--branch', 'master',
'--check', 'CKV_DOCKER_1']
argv_parser = ExtArgumentParser(config_file_parser_class=configargparse.YAMLConfigFileParser)
config_parser = ExtArgumentParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
default_config_files=['example_TestConfigFile/config.yml'],
)
argv_parser.add_parser_args()
config_parser.add_parser_args()
config_from_argv = argv_parser.parse_args(argv)
config_from_file = config_parser.parse_args([])
self.assertEqual(config_from_argv, config_from_file)
if __name__ == '__main__':
unittest.main()
|
6bb230de8d562da7e56133f7dd7fb6476f550ee7
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/html_parsing/wikipedia/get_seasons_anime_Dorohedoro.py
|
091db02c3f79ee3ec04d30ef09601543542923b8
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 417
|
py
|
get_seasons_anime_Dorohedoro.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import re
import requests
def get_seasons() -> list[str]:
rs = requests.get("https://en.wikipedia.org/wiki/List_of_Dorohedoro_episodes")
rs.raise_for_status()
items = re.findall(r"Season \w+", rs.text, flags=re.IGNORECASE)
return sorted(set(items))
if __name__ == "__main__":
print(get_seasons())
# ['Season One']
|
d994a0438c8fc84f302ceb983db65550826247e6
|
7af0ff378525ef6132f74bac0b1eb54ce4c40c08
|
/indico/modules/users/api.py
|
f3b8f9d2c5da1eb5056ada0b2166e0850df5ab2f
|
[
"MIT"
] |
permissive
|
indico/indico
|
1126ee0ac3e9d36510a64989ce71be9c02680831
|
463951511d3a8409f944f98f29875c4323f3e897
|
refs/heads/master
| 2023-08-31T11:15:00.092526
| 2023-08-30T11:07:25
| 2023-08-30T11:07:25
| 2,113,067
| 1,549
| 429
|
MIT
| 2023-09-13T20:09:56
| 2011-07-27T13:56:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
api.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import jsonify, session
from indico.modules.users import User
from indico.web.http_api.hooks.base import HTTPAPIHook
from indico.web.http_api.responses import HTTPAPIError
from indico.web.rh import RH, oauth_scope
@oauth_scope('read:user')
class RHUserAPI(RH):
def _process(self):
user = session.user
if not user:
return jsonify(None)
return jsonify(id=user.id, email=user.email, first_name=user.first_name, last_name=user.last_name,
admin=user.is_admin)
@HTTPAPIHook.register
class UserInfoHook(HTTPAPIHook):
TYPES = ('user',)
RE = r'(?P<user_id>[\d]+)'
VALID_FORMATS = ('json', 'jsonp', 'xml')
def _getParams(self):
super()._getParams()
self._user_id = self._pathParams['user_id']
def export_user(self, user):
from indico.modules.users.schemas import UserSchema
if not user:
raise HTTPAPIError('You need to be logged in', 403)
user = User.get(self._user_id, is_deleted=False)
if not user:
raise HTTPAPIError('Requested user not found', 404)
if not user.can_be_modified(user):
raise HTTPAPIError('You do not have access to that info', 403)
return [UserSchema().dump(user)]
|
c3afc7fe9b734691c1f36252e9650569af53c306
|
13ce98780a7e6e7e1412ae91a0fa97a91cf66a73
|
/examples/translations/chinese_test_1.py
|
7b417ce3da9dcc44fcef0fd41bf4d66d3d00f4ad
|
[
"MIT"
] |
permissive
|
seleniumbase/SeleniumBase
|
c607312c0b8f45297088c1283150eb73ea32c553
|
63d95c42fc84bbcea415c6d8a3a201587b89c92e
|
refs/heads/master
| 2023-09-06T05:58:07.923058
| 2023-09-02T14:14:03
| 2023-09-02T14:14:03
| 17,420,614
| 3,656
| 944
|
MIT
| 2023-09-13T21:12:20
| 2014-03-04T23:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
chinese_test_1.py
|
# Chinese Language Test
from seleniumbase.translate.chinese import 硒测试用例
硒测试用例.main(__name__, __file__)
class 我的测试类(硒测试用例):
def test_例子1(self):
self.开启("https://zh.wikipedia.org/wiki/")
self.断言标题("维基百科,自由的百科全书")
self.断言元素('a[title="Wikipedia:关于"]')
self.断言文本("新闻动态", "span#新闻动态")
self.输入文本('input[name="search"]', "舞龍")
self.单击('button:contains("搜索")')
self.断言文本("舞龍", "#firstHeading")
self.断言元素('img[src*="Chinese_draak.jpg"]')
self.回去()
self.输入文本('input[name="search"]', "麻婆豆腐")
self.单击('button:contains("搜索")')
self.断言文本("麻婆豆腐", "#firstHeading")
self.断言元素('figure:contains("一家中餐館的麻婆豆腐")')
self.回去()
self.输入文本('input[name="search"]', "精武英雄")
self.单击('button:contains("搜索")')
self.断言元素('img[src*="Fist_of_legend.jpg"]')
self.断言文本("李连杰", 'li a[title="李连杰"]')
|
465edd34bbf7882132ad86baba5118824723d658
|
02ef81922198f93ea3848d38dac06bf97f481dfa
|
/tests/urls.py
|
c249b021bbd808dacd3825bdab8e67074b0cffb7
|
[
"BSD-3-Clause"
] |
permissive
|
carltongibson/django-filter
|
e409976a37e6fb7bbf4598d0c45ac4fcbb65a36e
|
e5fc05dc00c420ae4dc9390ec6126eb9fc09ae06
|
refs/heads/main
| 2023-09-03T23:54:34.574330
| 2023-08-31T15:33:35
| 2023-08-31T15:45:32
| 117,741
| 3,009
| 651
|
NOASSERTION
| 2023-09-04T16:46:32
| 2009-01-30T00:35:57
|
Python
|
UTF-8
|
Python
| false
| false
| 354
|
py
|
urls.py
|
from django.urls import path
from django_filters.views import FilterView, object_filter
from .models import Book
def _foo():
return "bar"
urlpatterns = [
path(
"books-legacy/",
object_filter,
{"model": Book, "extra_context": {"foo": _foo, "bar": "foo"}},
),
path("books/", FilterView.as_view(model=Book)),
]
|
a05276e7029d2488f44179b85311b9f4aa900045
|
9f84d91a8ae3df53b07fe3267992fba00a99ac9e
|
/test/nn/conv/test_gps_conv.py
|
a1acd496d360ce682fb338834514904a96c828c8
|
[
"MIT"
] |
permissive
|
pyg-team/pytorch_geometric
|
ebea601eae228f3905465b5c2349d3fb3bb5cb26
|
a52af694b8ce6a80811e20966fe6d08a3e7511fe
|
refs/heads/master
| 2023-08-31T04:13:40.943308
| 2023-08-30T12:48:42
| 2023-08-30T12:48:42
| 106,024,057
| 6,775
| 1,563
|
MIT
| 2023-09-14T17:10:18
| 2017-10-06T16:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
test_gps_conv.py
|
import pytest
import torch
import torch_geometric.typing
from torch_geometric.nn import GPSConv, SAGEConv
from torch_geometric.typing import SparseTensor
from torch_geometric.utils import to_torch_csc_tensor
@pytest.mark.parametrize('attn_type', ['multihead', 'performer'])
@pytest.mark.parametrize('norm', [None, 'batch_norm', 'layer_norm'])
def test_gps_conv(norm, attn_type):
x = torch.randn(4, 16)
edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]])
batch = torch.tensor([0, 0, 1, 1])
adj1 = to_torch_csc_tensor(edge_index, size=(4, 4))
conv = GPSConv(16, conv=SAGEConv(16, 16), heads=4, norm=norm,
attn_type=attn_type)
conv.reset_parameters()
assert str(conv) == (f'GPSConv(16, conv=SAGEConv(16, 16, aggr=mean), '
f'heads=4, attn_type={attn_type})')
out = conv(x, edge_index)
assert out.size() == (4, 16)
assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6)
if torch_geometric.typing.WITH_TORCH_SPARSE:
adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4))
assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6)
out = conv(x, edge_index, batch)
assert out.size() == (4, 16)
assert torch.allclose(conv(x, adj1.t(), batch), out, atol=1e-6)
if torch_geometric.typing.WITH_TORCH_SPARSE:
assert torch.allclose(conv(x, adj2.t(), batch), out, atol=1e-6)
|
bca5711b87161e56e677bc93122074d54eddb599
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/tests/test_0003_migrate_lb.py
|
18cb63df669224e1c339299dc847e5895dfe5ad9
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 631
|
py
|
test_0003_migrate_lb.py
|
# ----------------------------------------------------------------------
# Test migrate-liftbridge
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import pytest
@pytest.mark.usefixtures("database")
def test_migrate_lb(database):
"""
Test migrate-liftbridge
:param database:
:return:
"""
m = __import__("noc.commands.migrate-liftbridge", {}, {}, "Command")
assert m.Command().run_from_argv(["--slots", "1"]) == 0
|
8fecbc735a6a5b44db2216eed9a7972cc6a506f0
|
660c4c4c14b29109a772b00169a0fd50108273fa
|
/tests/test_compute.py
|
76e9d4e11845a1e12662c1c1b79a8523a7348208
|
[
"Apache-2.0"
] |
permissive
|
matrix-profile-foundation/matrixprofile
|
80c3e026c11b39e6431b0e248cdd04f9eb482858
|
6fbd5fe2fd0e93162ef77c4da1b30188072dd404
|
refs/heads/master
| 2022-11-28T13:26:12.289263
| 2022-11-25T13:40:05
| 2022-11-25T13:40:05
| 198,119,545
| 345
| 77
|
Apache-2.0
| 2023-08-17T17:40:45
| 2019-07-22T00:33:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,959
|
py
|
test_compute.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import os
import pytest
import numpy as np
from matrixprofile import compute
import matrixprofile
MODULE_PATH = matrixprofile.__path__[0]
def test_compute_mp_exact_no_query():
ts = np.loadtxt(os.path.join(MODULE_PATH, '..', 'tests', 'sampledata.txt'))
m = 32
profile = compute(ts, windows=m)
assert(profile['algorithm'] == 'mpx')
assert(profile['w'] == 32)
assert(profile['data']['query'] == None)
assert(profile['join'] == False)
assert(profile['sample_pct'] == 1)
assert(profile['class'] == 'MatrixProfile')
def test_compute_mp_exact_with_query():
ts = np.loadtxt(os.path.join(MODULE_PATH, '..', 'tests', 'sampledata.txt'))
query = ts[100:200]
m = 32
profile = compute(ts, windows=m, query=query)
assert(profile['algorithm'] == 'mpx')
assert(profile['w'] == 32)
np.testing.assert_equal(profile['data']['query'], query)
assert(profile['join'] == True)
assert(profile['sample_pct'] == 1)
assert(profile['class'] == 'MatrixProfile')
def test_compute_mp_approximate():
ts = np.loadtxt(os.path.join(MODULE_PATH, '..', 'tests', 'sampledata.txt'))
m = 32
profile = compute(ts, windows=m, sample_pct=0.5)
assert(profile['algorithm'] == 'scrimp++')
assert(profile['w'] == 32)
assert(profile['data']['query'] == None)
assert(profile['join'] == False)
assert(profile['sample_pct'] == 0.5)
assert(profile['class'] == 'MatrixProfile')
def test_compute_pmp_no_sample_pct_windows():
ts = np.loadtxt(os.path.join(MODULE_PATH, '..', 'tests', 'sampledata.txt'))
windows = np.arange(8, 32)
profile = compute(ts, windows=windows)
assert(profile['algorithm'] == 'skimp')
assert(profile['class'] == 'PMP')
assert(profile['sample_pct'] == 1)
np.testing.assert_equal(profile['windows'], windows)
def test_compute_pmp_sample_pct_windows():
ts = np.loadtxt(os.path.join(MODULE_PATH, '..', 'tests', 'sampledata.txt'))
windows = np.arange(8, 32)
profile = compute(ts, windows=windows, sample_pct=1)
assert(profile['algorithm'] == 'skimp')
assert(profile['class'] == 'PMP')
assert(profile['sample_pct'] == 1)
np.testing.assert_equal(profile['windows'], windows)
def test_compute_pmp_no_windows():
ts = np.loadtxt(os.path.join(MODULE_PATH, '..', 'tests', 'sampledata.txt'))
profile = compute(ts)
assert(profile['algorithm'] == 'skimp')
assert(profile['class'] == 'PMP')
# sample pct is ignored when windows are provided and defaults to 1
assert(profile['sample_pct'] == 1)
def test_compute_pmp_no_windows_sample_pct():
ts = np.loadtxt(os.path.join(MODULE_PATH, '..', 'tests', 'sampledata.txt'))
profile = compute(ts, sample_pct=0.1)
assert(profile['algorithm'] == 'skimp')
assert(profile['class'] == 'PMP')
# sample pct is ignored when windows are provided and defaults to 1
assert(profile['sample_pct'] == 0.1)
def test_compute_mp_invalid_windows():
ts = [3., 3., 3., 3., 3., 3., 3., 3.]
with pytest.raises(ValueError) as excinfo:
w = 0
compute(ts, windows=w)
assert 'Compute requires all window sizes to be greater than 3!' \
in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
w = 3
compute(ts, windows=w)
assert 'Compute requires all window sizes to be greater than 3!' \
in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
w = [4, 0]
compute(ts, windows=w)
assert 'Compute requires all window sizes to be greater than 3!' \
in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
w = [4, 3]
compute(ts, windows=w)
assert 'Compute requires all window sizes to be greater than 3!' \
in str(excinfo.value)
def test_preprocess():
ts = np.array([np.nan, np.inf, np.inf, np.nan, np.inf, 2, 3, 2, 3, 1, 2, 3, 4, 2,
np.nan, np.inf, 4, 2, 3, 4, 5, 6, 7, 8, 3, 4, 2, 3, 4, 5, 6, 7, 6,
5, 4, 3, np.nan, np.nan, np.inf, np.nan, np.inf, np.nan])
m = 6
preprocessing_kwargs = {
'window': 5,
'impute_method': 'median',
'impute_direction': 'backward',
'add_noise': False
}
profile = compute(ts, windows=m, preprocessing_kwargs=preprocessing_kwargs)
preprocessed_ts = profile['data']['ts']
assert(np.any(np.isnan(preprocessed_ts)) == False)
assert(np.any(np.isinf(preprocessed_ts)) == False)
# if preprocessing_kwargs=None, we disable the preprocessing procedure.
profile = compute(ts, windows=m, preprocessing_kwargs=None)
unprocessed_ts = profile['data']['ts']
assert(np.any(np.isnan(unprocessed_ts)) == True)
assert(np.any(np.isinf(unprocessed_ts)) == True)
# check if preprocessing_kwargs is None by default.
profile = compute(ts, windows=m)
unprocessed_ts = profile['data']['ts']
assert(np.any(np.isnan(unprocessed_ts)) == True)
assert(np.any(np.isinf(unprocessed_ts)) == True)
with pytest.raises(ValueError) as excinfo:
compute(ts, windows=m, preprocessing_kwargs=1)
assert "The parameter 'preprocessing_kwargs' is not dict like!" \
in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
preprocessing_kwargs = {
'win': 5,
'impute_dir': 'backward',
}
compute(ts, windows=m, preprocessing_kwargs=preprocessing_kwargs)
assert "invalid key(s) for preprocessing_kwargs! valid key(s) should include " \
"{'impute_direction', 'add_noise', 'impute_method', 'window'}" \
in str(excinfo.value)
|
30b969ecae5de9e3cec4beade197ce8e91e5efad
|
9b391863599ecc26a6804f9f272f8c7f2aee8a8c
|
/tick/preprocessing/tests/longitudinal_features_lagger_test.py
|
5aec583b737891920a670445037bdf6ff6d97ed1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
X-DataInitiative/tick
|
4db1bce7471bb48757b54e86b0f7946f36e78dde
|
04dbb377b47783036a8343c6a61b60fc9f430dc3
|
refs/heads/master
| 2023-08-13T10:28:15.560632
| 2023-03-05T00:16:57
| 2023-03-05T00:16:57
| 75,284,069
| 475
| 120
|
BSD-3-Clause
| 2023-03-05T00:16:58
| 2016-12-01T10:59:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,464
|
py
|
longitudinal_features_lagger_test.py
|
# License: BSD 3 clause
import numpy as np
from scipy.sparse import csr_matrix
import unittest
from tick.preprocessing import LongitudinalFeaturesLagger
class Test(unittest.TestCase):
def setUp(self):
self.features = [
np.array([[0, 1, 0], [0, 0, 0], [0, 1, 1]], dtype="float64"),
np.array([[1, 1, 1], [0, 0, 1], [1, 1, 0]], dtype="float64")
]
self.sparse_features = [csr_matrix(f) for f in self.features]
self.censoring = np.array([2, 3], dtype="uint64")
self.expected_output = [
np.array([[0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0.]]),
np.array([[1, 0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1.]])
]
self.n_lags = np.array([1, 2, 1], dtype="uint64")
def test_dense_pre_convolution(self):
feat_prod, _, _ = LongitudinalFeaturesLagger(n_lags=self.n_lags)\
.fit_transform(self.features, censoring=self.censoring)
np.testing.assert_equal(feat_prod, self.expected_output)
def test_sparse_pre_convolution(self):
feat_prod, _, _ = LongitudinalFeaturesLagger(n_lags=self.n_lags)\
.fit_transform(self.sparse_features, censoring=self.censoring)
feat_prod = [f.todense() for f in feat_prod]
np.testing.assert_equal(feat_prod, self.expected_output)
if __name__ == "__main__":
unittest.main()
|
7bc4afaf174c0fcef48f415dab8b0c6c42c838e2
|
c618bbf2719431999b1007461df0865bab60c883
|
/dali/python/nvidia/dali/_autograph/operators/__init__.py
|
7599ef59f80b86cdb4488385d6f683df96e8ebc4
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DALI
|
3d0d061135d19e092647e6522046b2ff23d4ef03
|
92ebbe5c20e460050abd985acb590e6c27199517
|
refs/heads/main
| 2023-09-04T01:53:59.033608
| 2023-09-01T13:45:03
| 2023-09-01T13:45:03
| 135,768,037
| 4,851
| 648
|
Apache-2.0
| 2023-09-12T18:00:22
| 2018-06-01T22:18:01
|
C++
|
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module implements operators that AutoGraph overloads.
Note that "operator" is used loosely here, and includes control structures like
conditionals and loops, implemented in functional form, using for example
closures for the body.
"""
# Naming conventions:
# * operator names match the name usually used for the respective Python
# idiom; examples: for_stmt, list_append
# * operator arguments match either of:
# - the corresponding Python AST attribute (e.g. the condition of an if
# statement is called test) if the operator represents an AST construct
# - the names used in the Python docs, if the operator is a function (e.g.
# list_ and x for append, see
# https://docs.python.org/3.7/tutorial/datastructures.html)
#
# All operators may accept a final argument named "opts", of a type that
# subclasses namedtuple and contains any arguments that are only required
# for some specializations of the operator.
from nvidia.dali._autograph.operators.conditional_expressions import if_exp
from nvidia.dali._autograph.operators.control_flow import for_stmt
from nvidia.dali._autograph.operators.control_flow import if_stmt
from nvidia.dali._autograph.operators.control_flow import while_stmt
from nvidia.dali._autograph.operators.data_structures import list_append
from nvidia.dali._autograph.operators.data_structures import list_pop
from nvidia.dali._autograph.operators.data_structures import list_stack
from nvidia.dali._autograph.operators.data_structures import ListPopOpts
from nvidia.dali._autograph.operators.data_structures import ListStackOpts
from nvidia.dali._autograph.operators.data_structures import new_list
from nvidia.dali._autograph.operators.exceptions import assert_stmt
from nvidia.dali._autograph.operators.logical import and_
from nvidia.dali._autograph.operators.logical import eq
from nvidia.dali._autograph.operators.logical import not_
from nvidia.dali._autograph.operators.logical import not_eq
from nvidia.dali._autograph.operators.logical import or_
from nvidia.dali._autograph.operators.py_builtins import float_
from nvidia.dali._autograph.operators.py_builtins import int_
from nvidia.dali._autograph.operators.py_builtins import len_
from nvidia.dali._autograph.operators.py_builtins import print_
from nvidia.dali._autograph.operators.py_builtins import range_
from nvidia.dali._autograph.operators.slices import get_item
from nvidia.dali._autograph.operators.slices import GetItemOpts
from nvidia.dali._autograph.operators.slices import set_item
from nvidia.dali._autograph.operators.variables import ld
from nvidia.dali._autograph.operators.variables import ldu
from nvidia.dali._autograph.operators.variables import Undefined
from nvidia.dali._autograph.operators.variables import UndefinedReturnValue
|
93e6ce1908f01a1bc149db9e56bc8f3b2d74cce0
|
636849fc7edd9dcb095cf3410a121ab37de69f02
|
/SoftLayer/CLI/order/package_list.py
|
97e435e840701f522b55012cbd64385df1560ff5
|
[
"MIT"
] |
permissive
|
softlayer/softlayer-python
|
bcb09306c3367fdbd2f1407f770c4959729b074c
|
5798373055d9f34dfd531d81638a64d0a7901a13
|
refs/heads/master
| 2023-08-23T19:32:36.990701
| 2023-08-21T03:29:44
| 2023-08-21T03:29:44
| 622,291
| 126
| 182
|
MIT
| 2023-09-14T15:04:48
| 2010-04-21T20:36:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
package_list.py
|
"""List packages."""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI.command import SLCommand as SLCommand
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers import ordering
COLUMNS = ['id',
'name',
'keyName',
'type']
@click.command(cls=SLCommand)
@click.option('--keyword', help="A word (or string) used to filter package names.")
@click.option('--package_type', help="The keyname for the type of package. BARE_METAL_CPU for example")
@environment.pass_env
def cli(env, keyword, package_type):
"""List packages that can be ordered via the placeOrder API.
::
# List out all packages for ordering
slcli order package-list
# List out all packages with "server" in the name
slcli order package-list --keyword server
# Select only specifict package types
slcli order package-list --package_type BARE_METAL_CPU
"""
manager = ordering.OrderingManager(env.client)
table = formatting.Table(COLUMNS)
_filter = {'type': {'keyName': {'operation': '!= BLUEMIX_SERVICE'}}}
if keyword:
_filter['name'] = {'operation': '*= %s' % keyword}
if package_type:
_filter['type'] = {'keyName': {'operation': package_type}}
packages = manager.list_packages(filter=_filter)
for package in packages:
table.add_row([
package['id'],
package['name'],
package['keyName'],
package['type']['keyName']
])
env.fout(table)
|
30b28f5609c4d3a92542f258140f5f6d890649aa
|
b4cfd4949cab5dc5bd27fb028596a9fc02f4e1db
|
/examples/plot_clustering.py
|
e4c968f112aa853876fa9ab12b01caf4520f8c00
|
[
"BSD-3-Clause"
] |
permissive
|
GAA-UAM/scikit-fda
|
dabfd995f2c82efb0d44fa1d2005b2a8ca67442b
|
dfbce35cc9e67d93306dddf0edf4f95aaacd8aff
|
refs/heads/develop
| 2023-08-31T09:11:31.407423
| 2023-08-18T08:19:21
| 2023-08-18T08:19:21
| 96,133,420
| 231
| 55
|
BSD-3-Clause
| 2023-08-18T08:19:22
| 2017-07-03T17:06:56
|
Python
|
UTF-8
|
Python
| false
| false
| 6,777
|
py
|
plot_clustering.py
|
"""
Clustering
==========
In this example, the use of the clustering plot methods is shown applied to the
Canadian Weather dataset. K-Means and Fuzzy K-Means algorithms are employed to
calculate the results plotted.
"""
# Author: Amanda Hernando Bernabé
# License: MIT
# sphinx_gallery_thumbnail_number = 6
import matplotlib.pyplot as plt
import numpy as np
from skfda import datasets
from skfda.exploratory.visualization.clustering import (
ClusterMembershipLinesPlot,
ClusterMembershipPlot,
ClusterPlot,
)
from skfda.ml.clustering import FuzzyCMeans, KMeans
##############################################################################
# First, the Canadian Weather dataset is downloaded from the package 'fda' in
# CRAN. It contains a FDataGrid with daily temperatures and precipitations,
# that is, it has a 2-dimensional image. We are interested only in the daily
# average temperatures, so we select the first coordinate function.
X, y = datasets.fetch_weather(return_X_y=True, as_frame=True)
fd = X.iloc[:, 0].values
fd_temperatures = fd.coordinates[0]
target = y.values
# The desired FDataGrid only contains 10 random samples, so that the example
# provides clearer plots.
indices_samples = np.array([1, 3, 5, 10, 14, 17, 21, 25, 27, 30])
fd = fd_temperatures[indices_samples]
##############################################################################
# The data is plotted to show the curves we are working with. They are divided
# according to the target. In this case, it includes the different climates to
# which the weather stations belong to.
climates = target[indices_samples].remove_unused_categories()
# Assigning the color to each of the groups.
colormap = plt.cm.get_cmap('tab20b')
n_climates = len(climates.categories)
climate_colors = colormap(np.arange(n_climates) / (n_climates - 1))
fd.plot(group=climates.codes, group_names=climates.categories,
group_colors=climate_colors)
##############################################################################
# The number of clusters is set with the number of climates, in order to see
# the performance of the clustering methods, and the seed is set to one in
# order to obatain always the same result for the example.
n_clusters = n_climates
seed = 2
##############################################################################
# First, the class :class:`~skfda.ml.clustering.KMeans` is instantiated with
# the desired. parameters. Its :func:`~skfda.ml.clustering.KMeans.fit` method
# is called, resulting in the calculation of several attributes which include
# among others, the the number of cluster each sample belongs to (labels), and
# the centroids of each cluster. The labels are obtaiined calling the method
# :func:`~skfda.ml.clustering.KMeans.predict`.
kmeans = KMeans(n_clusters=n_clusters, random_state=seed)
kmeans.fit(fd)
print(kmeans.predict(fd))
##############################################################################
# To see the information in a graphic way, the method
# :func:`~skfda.exploratory.visualization.clustering_plots.plot_clusters` can
# be used.
# Customization of cluster colors and labels in order to match the first image
# of raw data.
cluster_colors = climate_colors[np.array([0, 2, 1])]
cluster_labels = climates.categories[np.array([0, 2, 1])]
ClusterPlot(kmeans, fd, cluster_colors=cluster_colors,
cluster_labels=cluster_labels).plot()
##############################################################################
# Other clustering algorithm implemented is the Fuzzy K-Means found in the
# class :class:`~skfda.ml.clustering.FuzzyCMeans`. Following the
# above procedure, an object of this type is instantiated with the desired
# data and then, the
# :func:`~skfda.ml.clustering.FuzzyCMeans.fit` method is called.
# Internally, the attribute ``membership_degree_`` is calculated, which contains
# ´n_clusters´ elements for each sample and dimension, denoting the degree of
# membership of each sample to each cluster. They are obtained calling the
# method :func:`~skfda.ml.clustering.FuzzyCMeans.predict_proba`. Also, the centroids
# of each cluster are obtained.
fuzzy_kmeans = FuzzyCMeans(n_clusters=n_clusters, random_state=seed)
fuzzy_kmeans.fit(fd)
print(fuzzy_kmeans.predict_proba(fd))
##############################################################################
# To see the information in a graphic way, the method
# :func:`~skfda.exploratory.visualization.clustering_plots.plot_clusters` can
# be used. It assigns each sample to the cluster whose membership value is the
# greatest.
ClusterPlot(fuzzy_kmeans, fd, cluster_colors=cluster_colors,
cluster_labels=cluster_labels).plot()
##############################################################################
# Another plot implemented to show the results in the class
# :class:`~skfda.ml.clustering.FuzzyCMeans` is
# :func:`~skfda.exploratory.visualization.clustering_plots.plot_cluster_lines`
# which is similar to parallel coordinates. It is recommended to assign colors
# to each of the samples in order to identify them. In this example, the
# colors are the ones of the first plot, dividing the samples by climate.
colors_by_climate = colormap(climates.codes / (n_climates - 1))
ClusterMembershipLinesPlot(fuzzy_kmeans, fd, cluster_labels=cluster_labels,
sample_colors=colors_by_climate).plot()
##############################################################################
# Finally, the function
# :func:`~skfda.exploratory.visualization.clustering_plots.plot_cluster_bars`
# returns a barplot. Each sample is designated with a bar which is filled
# proportionally to the membership values with the color of each cluster.
ClusterMembershipPlot(fuzzy_kmeans, fd, cluster_colors=cluster_colors,
cluster_labels=cluster_labels).plot()
##############################################################################
# The possibility of sorting the bars according to a cluster is given
# specifying the number of cluster, which belongs to the interval
# [0, n_clusters).
#
# We can order the data using the first cluster:
ClusterMembershipPlot(fuzzy_kmeans, fd, sort=0, cluster_colors=cluster_colors,
cluster_labels=cluster_labels).plot()
##############################################################################
# Using the second cluster:
ClusterMembershipPlot(fuzzy_kmeans, fd, sort=1, cluster_colors=cluster_colors,
cluster_labels=cluster_labels).plot()
##############################################################################
# And using the third cluster:
ClusterMembershipPlot(fuzzy_kmeans, fd, sort=2, cluster_colors=cluster_colors,
cluster_labels=cluster_labels).plot()
|
824daec0baf2011cebccf5f13a0abb33c7f2f571
|
ed83a8a01473055b6563f0a1122738442f69be0a
|
/world/__init__.py
|
d22fd785803b8651c3e371f1b228f4446fb78e29
|
[] |
no_license
|
cs50/problems
|
528a6a09e533d7dabaebd5d67bd8d7052d35681b
|
96f8dd1c6b8202dcc67f36f06471a178acc4237f
|
refs/heads/2023/x
| 2023-09-03T20:10:55.654530
| 2023-08-30T21:17:54
| 2023-08-30T21:17:54
| 137,074,067
| 128
| 221
| null | 2023-09-04T17:39:14
| 2018-06-12T13:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 369
|
py
|
__init__.py
|
import check50
import check50.c
@check50.check()
def exists():
"""hello.c exists"""
check50.exists("hello.c")
@check50.check(exists)
def compiles():
"""hello.c compiles"""
check50.c.compile("hello.c", lcs50=True)
@check50.check(compiles)
def world():
"""hello.c prints \"hello, world\""""
check50.run("./hello").stdout("hello, world").exit()
|
6c95bcd5062b7d1afe834bff9d816eef58860e85
|
5d4b70ac5e555e3c8b68534ef1790ce041a0f65e
|
/pyunittests/pyluxcoreunittests/tests/scene/testDeleteObjects.py
|
d8f13b519c86558361347383a74b722bf6b66911
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
LuxCoreRender/LuxCore
|
eddb0e3710cbc8fa28cb80f16d908f1ec3cc72db
|
2f35684a04d9e1bd48d6ffa88b19a88871e90942
|
refs/heads/master
| 2023-08-17T01:28:23.931381
| 2023-05-28T22:25:00
| 2023-05-28T22:25:00
| 111,695,279
| 1,055
| 154
|
Apache-2.0
| 2023-08-03T20:21:05
| 2017-11-22T14:36:32
|
C++
|
UTF-8
|
Python
| false
| false
| 3,998
|
py
|
testDeleteObjects.py
|
# -*- coding: utf-8 -*-
################################################################################
# Copyright 1998-2018 by authors (see AUTHORS.txt)
#
# This file is part of LuxCoreRender.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import time
import array
import unittest
import pyluxcore
from pyluxcoreunittests.tests.utils import *
from pyluxcoreunittests.tests.imagetest import *
class TestDeleteObjects(unittest.TestCase):
def CreateConfig(self, type):
# Load the configuration from file
props = pyluxcore.Properties("resources/scenes/simple/simple.cfg")
# Change the render engine to PATHCPU
props.Set(pyluxcore.Property("renderengine.type", ["PATHCPU"]))
props.Set(pyluxcore.Property("sampler.type", ["RANDOM"]))
props.Set(GetDefaultEngineProperties("PATHCPU"))
config = pyluxcore.RenderConfig(props)
scene = config.GetScene()
# Delete the red and green boxes
scene.DeleteObject("box1")
scene.DeleteObject("box2")
# Create the base object
props = pyluxcore.Properties()
if (type == "Normal"):
props.SetFromString("""
scene.objects.box1.ply = resources/scenes/simple/simple-mat-cube1.ply
scene.objects.box1.material = redmatte
""")
elif (type == "Instance"):
props.SetFromString("""
scene.objects.box1.ply = resources/scenes/simple/simple-mat-cube1.ply
scene.objects.box1.material = redmatte
scene.objects.box1.transformation = 1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -0.5 0.0 0.0 1.0
""")
elif (type == "Motion"):
props.SetFromString("""
scene.objects.box1.ply = resources/scenes/simple/simple-mat-cube1.ply
scene.objects.box1.material = redmatte
scene.objects.box1.motion.0.time = 0.0
scene.objects.box1.motion.0.transformation = 1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -0.25 0.0 0.0 1.0
scene.objects.box1.motion.1.time = 1.0
scene.objects.box1.motion.1.transformation = 1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.25 0.0 0.0 1.0
""")
else:
self.assertFalse()
scene.Parse(props)
return config
#---------------------------------------------------------------------------
def DeleteObjects(self, type):
config = self.CreateConfig(type)
scene = config.GetScene()
# Duplicate the base object
mat = [1.0 if i==j else 0.0 for j in range(4) for i in range(4)]
#t1 = time.time()
#objCount = 1000000
objCount = 5
objNames = []
for i in range(objCount):
mat[0 + 3 * 4] = 2.5 * (i + 1)
objID = i
scene.DuplicateObject("box1", "box1_dup" + str(i), mat, objID)
objNames += ["box1_dup" + str(i)]
#t2 = time.time()
#logger.info("Elapsed time: " + str(t2 - t1))
# Time for 1,000,000 Normal: 5.675945281982422 secs
# Time for 1,000,000 Instance: 5.323255777359009 secs
# Time for 1,000,000 Motion: 5.41925048828125 secs
#t1 = time.time()
scene.DeleteObjects(objNames)
#t2 = time.time()
#logger.info("Elapsed time: " + str(t2 - t1))
# Time for 1,000,000 Normal (before optimization): 4.250431776046753 secs
# Time for 1,000,000 Normal (after optimization): 0.9761378765106201 secs
# Run the rendering
StandardImageTest(self, "Scene_DeleteObject" + type, config)
def test_Scene_DeleteObjectsNormal(self):
self.DeleteObjects("Normal")
def test_Scene_DeleteObjectsInstance(self):
self.DeleteObjects("Instance")
def test_Scene_DeletesObjectsMotion(self):
self.DeleteObjects("Motion")
|
8d39a4ae8d23995b3e226ee61f19ba9a712795a0
|
d36388e75a43746c415f417e6fc026daa4a52f72
|
/sciluigi/slurm.py
|
85d475de074d6a4e8cde15336d13179f6a2d019f
|
[
"MIT"
] |
permissive
|
pharmbio/sciluigi
|
bfab10161cd476813c583c559c61e4123df7e11b
|
11daa90eb49b97e71a02e8bf4965feed33df05b3
|
refs/heads/master
| 2023-06-07T09:48:40.188139
| 2023-01-07T22:35:52
| 2023-01-07T22:35:52
| 35,960,208
| 309
| 59
|
MIT
| 2023-01-06T22:05:29
| 2015-05-20T16:31:45
|
Python
|
UTF-8
|
Python
| false
| false
| 8,310
|
py
|
slurm.py
|
'''
This module contains functionality related to integration with the SLURM HPC
resource manger.
'''
import datetime
import logging
import re
import time
import sciluigi.parameter
import sciluigi.task
import subprocess as sub
# ================================================================================
# Setup logging
log = logging.getLogger('sciluigi-interface')
# A few 'constants'
RUNMODE_LOCAL = 'runmode_local'
RUNMODE_HPC = 'runmode_hpc'
RUNMODE_MPI = 'runmode_mpi'
# ================================================================================
class SlurmInfo():
'''
A data object for keeping slurm run parameters.
'''
runmode = None # One of RUNMODE_LOCAL|RUNMODE_HPC|RUNMODE_MPI
project = None
partition = None
cores = None
time = None
jobname = None
threads = None
def __init__(self, runmode, project, partition, cores, time, jobname, threads):
'''
Init a SlurmInfo object, from string data.
Time is on format: [[[d-]HH:]MM:]SS
'''
self.runmode = runmode
self.project = project
self.partition = partition
self.cores = cores
self.time = time
self.jobname = jobname
self.threads = threads
def __str__(self):
'''
Return a readable string representation of the info stored
'''
strrepr = ('(time: {t}, '
'partition: {pt}, '
'cores: {c}, '
'threads: {thr}, '
'jobname: {j}, '
'project: {pr})').format(
t=self.time,
pt=self.partition,
c=self.cores,
thr=self.threads,
j=self.jobname,
pr=self.project)
return strrepr
def get_argstr_hpc(self):
'''
Return a formatted string with arguments and option flags to SLURM
commands such as salloc and sbatch, for non-MPI, HPC jobs.
'''
argstr = ' -A {pr} -p {pt} -n {c} -t {t} -J {j} srun -n 1 -c {thr} '.format(
pr=self.project,
pt=self.partition,
c=self.cores,
t=self.time,
j=self.jobname,
thr=self.threads)
return argstr
def get_argstr_mpi(self):
'''
Return a formatted string with arguments and option flags to SLURM
commands such as salloc and sbatch, for MPI jobs.
'''
argstr = ' -A {pr} -p {pt} -n {c1} -t {t} -J {j} mpirun -v -np {c2} '.format(
pr=self.project,
pt=self.partition,
c1=self.cores,
t=self.time,
j=self.jobname,
c2=self.cores)
return argstr
# ================================================================================
class SlurmInfoParameter(sciluigi.parameter.Parameter):
'''
A specialized luigi parameter, taking SlurmInfo objects.
'''
def parse(self, x):
if isinstance(x, SlurmInfo):
return x
else:
raise Exception('Parameter is not instance of SlurmInfo: %s' % x)
# ================================================================================
class SlurmHelpers():
'''
Mixin with various convenience methods for executing jobs via SLURM
'''
# Other class-fields
slurminfo = SlurmInfoParameter(default=None) # Class: SlurmInfo
# Main Execution methods
def ex(self, command):
'''
Execute either locally or via SLURM, depending on config
'''
if isinstance(command, list):
command = ' '.join(command)
if self.slurminfo.runmode == RUNMODE_LOCAL:
log.info('Executing command in local mode: %s', command)
self.ex_local(command) # Defined in task.py
elif self.slurminfo.runmode == RUNMODE_HPC:
log.info('Executing command in HPC mode: %s', command)
self.ex_hpc(command)
elif self.slurminfo.runmode == RUNMODE_MPI:
log.info('Executing command in MPI mode: %s', command)
self.ex_mpi(command)
def ex_hpc(self, command):
'''
Execute command in HPC mode
'''
if isinstance(command, list):
command = sub.list2cmdline(command)
fullcommand = 'salloc %s %s' % (self.slurminfo.get_argstr_hpc(), command)
(retcode, stdout, stderr) = self.ex_local(fullcommand)
self.log_slurm_info(stderr)
return (retcode, stdout, stderr)
def ex_mpi(self, command):
'''
Execute command in HPC mode with MPI support (multi-node, message passing interface).
'''
if isinstance(command, list):
command = sub.list2cmdline(command)
fullcommand = 'salloc %s %s' % (self.slurminfo.get_argstr_mpi(), command)
(retcode, stdout, stderr) = self.ex_local(fullcommand)
self.log_slurm_info(stderr)
return (retcode, stdout, stderr)
# Various convenience methods
def assert_matches_character_class(self, char_class, a_string):
'''
Helper method, that tests whether a string matches a regex character class.
'''
if not bool(re.match('^{c}+$'.format(c=char_class), a_string)):
raise Exception('String {s} does not match character class {cc}'.format(
s=a_string, cc=char_class))
def clean_filename(self, filename):
'''
Clean up a string to make it suitable for use as file name.
'''
return re.sub('[^A-Za-z0-9\_\ ]', '_', str(filename)).replace(' ', '_')
#def get_task_config(self, name):
# return luigi.configuration.get_config().get(self.task_family, name)
def log_slurm_info(self, slurm_stderr):
'''
Parse information of the following example form:
salloc: Granted job allocation 5836263
srun: Job step created
salloc: Relinquishing job allocation 5836263
salloc: Job allocation 5836263 has been revoked.
'''
matches = re.search('[0-9]+', str(slurm_stderr))
if matches:
jobid = matches.group(0)
# Write slurm execution time to audit log
cmd = 'sacct -j {jobid} --noheader --format=elapsed'.format(jobid=jobid)
(_, jobinfo_stdout, _) = self.ex_local(cmd)
sacct_matches = re.findall('([0-9\:\-]+)', str(jobinfo_stdout))
if len(sacct_matches) < 2:
log.warn('Not enough matches from sacct for task %s: %s',
self.instance_name, ', '.join(['Match: %s' % m for m in sacct_matches])
)
else:
slurm_exectime_fmted = sacct_matches[1]
# Date format needs to be handled differently if the days field is included
if '-' in slurm_exectime_fmted:
tobj = time.strptime(slurm_exectime_fmted, '%d-%H:%M:%S')
self.slurm_exectime_sec = int(datetime.timedelta(
tobj.tm_mday,
tobj.tm_sec,
0,
0,
tobj.tm_min,
tobj.tm_hour).total_seconds())
else:
tobj = time.strptime(slurm_exectime_fmted, '%H:%M:%S')
self.slurm_exectime_sec = int(datetime.timedelta(
0,
tobj.tm_sec,
0,
0,
tobj.tm_min,
tobj.tm_hour).total_seconds())
log.info('Slurm execution time for task %s was %ss',
self.instance_name,
self.slurm_exectime_sec)
self.add_auditinfo('slurm_exectime_sec', int(self.slurm_exectime_sec))
# Write this last, so as to get the main task exectime and slurm exectime together in
# audit log later
self.add_auditinfo('slurm_jobid', jobid)
# ================================================================================
class SlurmTask(SlurmHelpers, sciluigi.task.Task):
'''
luigi task that includes the SlurmHelpers mixin.
'''
pass
|
002ebfc9eacf0a532bceb2791277cc8ed820ca06
|
bd3a1843e2b0dc15837628c77f73e95a9bb1264f
|
/tests/dataset/test_public_suffix.py
|
297ff64e1dc5ee680aede2efca9bf3ea6a487206
|
[
"Apache-2.0"
] |
permissive
|
funilrys/PyFunceble
|
404c64d1b281d4ae06a939b54f4088d63e12b828
|
214a57d0eca3df7c4ed3421937aaff9998452ba6
|
refs/heads/dev
| 2023-06-24T18:39:29.372775
| 2023-06-18T13:15:39
| 2023-06-18T13:15:39
| 106,995,518
| 267
| 62
|
Apache-2.0
| 2023-08-25T16:05:17
| 2017-10-15T08:25:14
|
Python
|
UTF-8
|
Python
| false
| false
| 8,051
|
py
|
test_public_suffix.py
|
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Tests of public suffic dataset interaction.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021, 2021 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import json
import tempfile
import unittest
import unittest.mock
from PyFunceble.dataset.base import DatasetBase
from PyFunceble.dataset.public_suffix import PublicSuffixDataset
class TestPublicSuffixDataset(unittest.TestCase):
"""
Tests the public suffix dataset interaction.
"""
def setUp(self) -> None:
"""
Setups everything needed by the tests.
"""
self.tempfile = tempfile.NamedTemporaryFile()
self.our_dataset = {
"ac": ["com.ac", "edu.ac", "gov.ac", "mil.ac", "net.ac", "org.ac"],
"academy": ["official.academy"],
"ad": ["nom.ad"],
}
self.tempfile.write(json.dumps(self.our_dataset).encode())
self.tempfile.seek(0)
self.ps_dataset = PublicSuffixDataset()
self.ps_dataset.source_file = self.tempfile.name
self.get_content_patch = unittest.mock.patch.object(DatasetBase, "get_content")
self.mock_get_content = self.get_content_patch.start()
self.mock_get_content.return_value = copy.deepcopy(self.our_dataset)
def tearDown(self) -> None:
"""
Destroys everything needed by the tests.
"""
del self.tempfile
del self.our_dataset
del self.ps_dataset
self.get_content_patch.stop()
del self.get_content_patch
def test_contains(self) -> None:
"""
Tests of the method which let us check if a given extension is into the
dataset.
"""
given = "ac"
expected = True
actual = given in self.ps_dataset
self.assertEqual(expected, actual)
def test_contains_extension_starts_with_point(self) -> None:
"""
Tests of the method which let us check if a given extension is into the
dataset for the case that the extension starts with a point.
"""
given = ".ac"
expected = True
actual = given in self.ps_dataset
self.assertEqual(expected, actual)
def test_does_not_contain(self) -> None:
"""
Tests of the method which let us check if a given extension is into the
dataset.
"""
given = "com"
expected = False
actual = given in self.ps_dataset
self.assertEqual(expected, actual)
def test_getitem(self) -> None:
"""
Tests the method which let us (indirectly) get the available subdataset.
"""
expected = copy.deepcopy(self.our_dataset["ad"])
actual = self.ps_dataset["ad"]
self.assertEqual(expected, actual)
def test_getitem_extension_starts_with_point(self) -> None:
"""
Tests the method which let us (indirectly) get the available subdataset.
"""
expected = copy.deepcopy(self.our_dataset["ad"])
actual = self.ps_dataset[".ad"]
self.assertEqual(expected, actual)
def test_getitem_not_exist(self) -> None:
"""
Tests the method which let us (indirectly) get the available subdataset
for the case that the given extension does not exists.
"""
expected = list() # pylint: disable=use-list-literal
actual = self.ps_dataset["hehehe"]
self.assertEqual(expected, actual)
def test_getattr(self) -> None:
"""
Tests the method which let us (indirectly) get the available subdataset.
"""
expected = copy.deepcopy(self.our_dataset["ad"])
actual = self.ps_dataset.ad
self.assertEqual(expected, actual)
def test_getattr_not_exist(self) -> None:
"""
Tests the method which let us (indirectly) get the available subdataset
for the case that the given extension does not exists.
"""
expected = list() # pylint: disable=use-list-literal
actual = self.ps_dataset.hehehe
self.assertEqual(expected, actual)
def test_is_extension(self) -> None:
"""
Tests the method which let us check if the given extension is known.
"""
expected = True
actual = self.ps_dataset.is_extension("ad")
self.assertEqual(expected, actual)
def test_is_not_extension(self) -> None:
"""
Tests the method which let us check if the given extension is known for
the case tha the given extension does not exists.
"""
expected = False
actual = self.ps_dataset.is_extension("heheh")
self.assertEqual(expected, actual)
def test_is_extension_not_str(self) -> None:
"""
Tests the method which let us check if the given extension is known for
the case that the given extension
"""
self.assertRaises(TypeError, lambda: self.ps_dataset.is_extension(["test"]))
def test_get_available_suffix(self) -> None:
"""
Tests the method which let us get the available suffixes of a
given extension.
"""
expected = copy.deepcopy(self.our_dataset["ad"])
actual = self.ps_dataset.get_available_suffix("ad")
self.assertEqual(expected, actual)
def test_get_available_suffix_not_extension(self) -> None:
"""
Tests the method which let us get the whois server of a given extension
for the case that the given extension is not known.
"""
expected = list() # pylint: disable=use-list-literal
actual = self.ps_dataset.get_available_suffix("hehehehehe")
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
|
129611b947626af8ec17955752fffa6ac91743f3
|
f2d4c0eac2b12a64c499b533f3fe0883262f6293
|
/examples/hyperlink.py
|
db4c6f4c2d384837d643aeef3a86bf921fbde2da
|
[
"MIT"
] |
permissive
|
mozman/svgwrite
|
716ef150734b42f42474fdaecc11eb646ac29bab
|
cd10a7ed1982de77ba85ff4556e154187d7f14bc
|
refs/heads/master
| 2022-08-10T03:19:14.453560
| 2022-07-14T13:59:39
| 2022-07-14T13:59:39
| 79,704,670
| 512
| 109
|
NOASSERTION
| 2022-07-14T13:34:27
| 2017-01-22T08:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 735
|
py
|
hyperlink.py
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: svg hyperlink examples
# Created: 05.05.2017
# Copyright (C) 2017, Manfred Moitzi
# License: MIT License
try:
import svgwrite
except ImportError:
# if svgwrite is not 'installed' append parent dir of __file__ to sys.path
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
import svgwrite
def hyperlink(name):
dwg = svgwrite.Drawing(name, (200, 200), debug=True)
# use the hyperlink element
link = dwg.add(dwg.a('http://www.w3.org'))
link.add(dwg.ellipse(center=(100, 50), r=(50, 25), fill='red'))
dwg.save(pretty=True)
if __name__ == '__main__':
hyperlink("hyperlink.svg")
|
6cfea640a0e75534d208bca5d08d7695122c766a
|
1f20484efc357aae4b7e2f98a191e7a9256f3a58
|
/irc3/plugins/search.py
|
e0e64ce20f6f14d8502db1b725b5bd6e77b23cf6
|
[
"CC-BY-3.0",
"MIT",
"LicenseRef-scancode-ietf"
] |
permissive
|
gawel/irc3
|
55b2e4d01ca95f45077f8bad231394551584d7bd
|
76d6849d5e7a531d649aca766f623f9f30a55545
|
refs/heads/master
| 2023-07-15T20:49:40.188267
| 2023-04-17T09:02:31
| 2023-04-17T09:02:31
| 14,820,406
| 187
| 58
|
MIT
| 2023-02-27T10:18:41
| 2013-11-30T12:09:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
search.py
|
# -*- coding: utf-8 -*-
from irc3.plugins.command import command
import irc3
__doc__ = '''
==============================================
:mod:`irc3.plugins.search` Search plugin
==============================================
.. autoclass:: Search
'''
@irc3.plugin
class Search:
requires = [
__name__.replace('search', 'command'),
]
headers = {
'User-Agent': 'python-requests/irc3/search',
'Cache-Control': 'max-age=0',
'Pragma': 'no-cache',
}
def __init__(self, bot):
self.bot = bot
try:
import requests
self.session = requests.Session()
self.session.headers.update(self.headers)
except ImportError: # pragma: no cover
self.session = None
@command(permission='view')
def ddg(self, mask, target, args):
"""Search using https://duckduckgo.com/api
%%ddg <query>...
"""
q = ' '.join(args['<query>'])
resp = self.session.get('http://api.duckduckgo.com/',
params=dict(q=q, format='json', t='irc3'),
allow_redirects=False)
ctype = resp.headers['content-type']
if 'json' in ctype or 'javascript' in ctype:
if resp.status_code == 200:
data = resp.json()
return '{AbstractText} - {AbstractURL}'.format(**data)
elif resp.status_code == 303:
return 'Redirect to: {0}'.format(resp.headers['location'])
|
45106de6277c1dcc4430f1b2f914fcbea32cee3e
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/webdriver/tests/classic/perform_actions/pointer_tripleclick.py
|
b45709da3cb06bb037ecb8b18a92ce325a5380dd
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
pointer_tripleclick.py
|
import math
from tests.classic.perform_actions.support.refine import get_events
from tests.support.asserts import assert_move_to_coordinates
from tests.support.helpers import filter_dict
lots_of_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "\
"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud "\
"exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."
def test_tripleclick_at_coordinates(session, mouse_chain, inline):
"""
This test does a triple click on a coordinate. On desktop platforms
this will select a paragraph. On mobile this will not have the same
desired outcome as taps are handled differently on mobile.
"""
session.url = inline("""<div>
{}
</div>""".format(lots_of_text))
div = session.find.css("div", all=False)
div_rect = div.rect
div_centre = {
"x": math.floor(div_rect["x"] + div_rect["width"] / 2),
"y": math.floor(div_rect["y"] + div_rect["height"] / 2)
}
mouse_chain \
.pointer_move(div_centre["x"], div_centre["y"]) \
.click() \
.click() \
.click() \
.perform()
actual_text = session.execute_script("return document.getSelection().toString();")
assert actual_text == lots_of_text
|
46966ca8a9ad42a89412aa48b90d56a95f2efcbb
|
48cd6a93fe538693fec65aaa81306e6b69b642ad
|
/dask/dataframe/tests/test_optimize_dataframe.py
|
7443d2a454b7bc8042145865b53014c15c36a4fc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dask/dask
|
0138cc2fb9aad27287643fe5ee240b8b09f2300d
|
18098d35298bad21c878c339d73de784612566c7
|
refs/heads/main
| 2023-09-04T02:39:37.886054
| 2023-09-01T19:02:00
| 2023-09-01T19:02:00
| 28,782,747
| 11,423
| 2,116
|
BSD-3-Clause
| 2023-09-14T17:36:04
| 2015-01-04T18:50:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
test_optimize_dataframe.py
|
from __future__ import annotations
import pandas as pd
import dask
import dask.dataframe as dd
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}, index=[9, 9, 9]),
}
dfs = list(dsk.values())
def test_fuse_ave_width():
df = pd.DataFrame({"x": range(10)})
df = dd.from_pandas(df, npartitions=5)
s = (df.x + 1) + (df.x + 2)
with dask.config.set({"optimization.fuse.ave-width": 4}):
a = s.__dask_optimize__(s.dask, s.__dask_keys__())
b = s.__dask_optimize__(s.dask, s.__dask_keys__())
assert len(a) <= 15
assert len(b) <= 15
def test_optimize_blockwise():
from dask.array.optimization import optimize_blockwise
df = pd.DataFrame({"x": range(10), "y": range(10)})
ddf = dd.from_pandas(df, npartitions=2)
for _ in range(10):
ddf["x"] = ddf.x + 1 + ddf.y
graph = optimize_blockwise(ddf.dask)
assert len(graph) <= 4
|
aaa2aa641c238365a9aa5f850ea1ac2149e0058e
|
a156285f264c2d327075f5d8a57f909e17d901ba
|
/tools/sample_idxs_to_text.py
|
ff35e19f51004a9a79385b96376a6592206cc190
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
bigscience-workshop/Megatron-DeepSpeed
|
2f0d53b047af3e05a3ffa277184a37dd13022308
|
e52bdabbde3c6895aceb76c1bced295c2646121f
|
refs/heads/main
| 2023-08-04T06:31:58.462403
| 2023-02-21T16:56:28
| 2023-02-21T16:56:28
| 382,420,841
| 979
| 199
|
NOASSERTION
| 2023-07-25T07:14:20
| 2021-07-02T17:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 6,363
|
py
|
sample_idxs_to_text.py
|
"""
A script which prints the training data according to the given sample index.
Note, that it's crucial that exactly the same corresponding arguments are passed as in the training script. Including the seed. Only then the random sequence from the generated by megatron .npy files will match.
Here is how to decipher the index file name:
meg-gpt2_oscar-combined_text_document_train_indexmap_100ns_1024sl_42s_sample_idx.npy
100ns = --train-samples 100
1024s = --seq-length 1024
42s = --seed 42
So these 3 have to match the training to get the correct output from this script.
If you're working on the same machine that already has the indices generated during the training, you can also do a sanity check that it doesn't generate new .npy files for the 3 train .npy files (but it will still do it for 3 valid and 3 test .npy files since we feed it a hardcoded setup of size 0 for both valid and test datasets.)
`--sample-id-range` is for consumed samples, so if the gap of interest is between these 2 iterations:
iteration 3848/ 159576 | consumed samples: 75888 | elapsed time per iteration (ms): 14308.9 | learning rate: 2.102E-05 | global batch size: 32 | lm loss: 6.452862E+00 | loss scale: 32768.0 | grad norm: 262044.694 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
iteration 3792/ 159576 | consumed samples: 74096 | elapsed time per iteration (ms): 16474.9 | learning rate: 2.052E-05 | global batch size: 32 | lm loss: 6.404737E+00 | loss scale: 32768.0 | grad norm: 214321.235 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
You'd then use:
`--sample-id-range 75888 74096`
the larger the batch size, the larger the number of samples will be.
Below is an example bash script to print the data in sample index range 5-15:
```
source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/
cd $MEGATRON_DEEPSPEED_REPO
VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
SEQ_LEN=2048
python tools/sample_idxs_to_text.py \
--print-text \
--sample-id-range 5 15 \
--seed 42 \
--train-samples 100 \
--seq-length $SEQ_LEN \
--data-path $DATA_PATH \
--data-impl mmap \
--tokenizer-type GPT2BPETokenizer \
--vocab-file $VOCAB_FILE \
--merge-file $MERGE_FILE
````
If you want tokens instead of text, remove `--print-text` and add `--print-tokens` (but you can have both). If you want full token dumps add `--all-tokens`
If you want the data written to a file add:
--output-file output.txt
This script can be extended to support valid and tests datasets as well, but currently ignores those.
Again, the key 3 args to get right are:
--seed 42 \
--train-samples 100 \
--seq-length $SEQ_LEN \
"""
import sys
import torch
from megatron import get_args
from megatron import get_tokenizer
from megatron import initialize_megatron
from megatron.data.data_samplers import build_pretraining_data_loader
from megatron.data.gpt_dataset import build_train_valid_test_datasets
from megatron.training import update_train_iters
def _add_network_size_args(parser):
group = parser.add_argument_group(title='Get text from sample idxs.')
group.add_argument('--sample-id-range', type=int, nargs='+', required=True,
help='The number of samples consumed. ex) --sample-id-range 1024 2048')
group.add_argument('--all-tokens', action='store_true', help='Whether to dump all tokens per record')
group.add_argument('--print-tokens', action='store_true', help='Whether to print tokens')
group.add_argument('--print-text', action='store_true', help='Whether to print text')
group.add_argument('--output-file', help='path to file if the dump should be saved into a file')
return parser
if __name__ == "__main__":
# megatron requires args, which are irrelevant to a task at hand, but w/o which it won't start.
# There prefill those and not require the user to enter them.
required_irrelevant_args = """
--num-layers 1
--hidden-size 1
--num-attention-heads 1
--max-position-embeddings 1000000
--eval-interval 1
--eval-iters 1
--micro-batch-size 1
--global-batch-size 1
""".split()
sys.argv.extend(required_irrelevant_args)
initialize_megatron(extra_args_provider=_add_network_size_args)
args = get_args()
tokenizer = get_tokenizer()
update_train_iters(args)
if not (args.print_tokens or args.print_text):
raise ValueError("Need to specify either --print_tokens or --print_text or both")
if args.all_tokens and not args.print_tokens:
raise ValueError("--all_tokens requires --print_tokens")
train_ds, _, _ = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=[args.train_samples, 0, 0],
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup)
)
# fast forward to where we want to start sampling
train_dataloader = build_pretraining_data_loader(train_ds, args.sample_id_range[0])
data_iterator = iter(train_dataloader)
if args.all_tokens:
torch.set_printoptions(threshold=2**20)
if args.output_file is not None:
print(f"*** Saving to {args.output_file}")
fh = open(args.output_file, "w")
else:
print(f"*** Dumping to stdout")
def write(msg):
if args.output_file:
fh.write(msg+"\n")
else:
print(msg)
for i in range(args.sample_id_range[0], args.sample_id_range[1]):
tokens = next(data_iterator)["text"][0]
if args.print_tokens:
write(f"{i} {tokens}")
if args.print_text:
trim_decode_tokens = tokenizer.detokenize(tokens.tolist())
write(f"{i} {trim_decode_tokens}")
if args.output_file is not None:
print(f"*** Output saved in {args.output_file}")
fh.close()
print(f"*** {args.sample_id_range[1]-args.sample_id_range[0]} records dumped")
|
2fd16d2cb82a247e1551cf5e2960afd4cdddf4d4
|
309205c535375ffc82b4f59d56e5594d208c9a80
|
/pythonbrasil-14/mas_da_pra_abrir_no_excel_exportar_para_txt_csv_json/demos_slides/03_exporta_csv.py
|
3237b24c02713c910cfcf60971cfbb8a1518d48d
|
[] |
no_license
|
pythonbrasil/talks
|
e8209d36681900e645ddc994f3ff318309c56b58
|
b0bd9f1a98e63bc0b1863ff11485fbfbfbeb32a9
|
refs/heads/master
| 2022-11-05T08:47:37.750164
| 2022-10-28T18:52:31
| 2022-10-28T18:52:31
| 71,194,771
| 140
| 59
| null | 2022-10-28T18:52:33
| 2016-10-18T01:04:54
|
HTML
|
UTF-8
|
Python
| false
| false
| 435
|
py
|
03_exporta_csv.py
|
import csv
locations = [('PUNE', '20th October 2018'),
('Sao Leopoldo', '20th October 2018'),
('Oko , Anambra', '3rd November 2018'),
('Abeokuta, Ogun', '3rd November 2018')]
filename = 'django_girls.csv'
with open(filename, 'w', newline='') as csvfile:
write_location = csv.writer(csvfile, delimiter=';', dialect='excel')
for city in locations:
write_location.writerow(city)
|
ebfa169c921d11d76eb9f6c0d691abcaffe2d434
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/miniclient/event.py
|
30237320472432c0665a7453fc3662334a8bc362
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
event.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/miniclient/event.py
from helpers import aop
class _ParametrizeInitAspect(aop.Aspect):
def atCall(self, cd):
cd.avoid()
return False
class _DisableEventBoards(aop.Aspect):
def atCall(self, cd):
cd.avoid()
return False
class InitEventPointcut(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.game_control.event_battles_controller', 'EventBattlesController', 'isEnabled', aspects=(_ParametrizeInitAspect,))
class DisableEventBoards(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'helpers.server_settings', 'ServerSettings', 'isElenEnabled', aspects=(_DisableEventBoards,))
|
4dba1126c5912e6ffe0fffe864fe0a656a09286b
|
0948f5944bcb95af55ac258d6104044ddbedab6b
|
/extras/widgets/calendar.py
|
ae6141139b946c414ed4320d6f231a067d2205bb
|
[
"MIT"
] |
permissive
|
peterhinch/micropython-nano-gui
|
e9b7ca20535bbb52c695083deb28721074cfa71e
|
5eef93317e83bc767da88fba8acdfc2a167db794
|
refs/heads/master
| 2023-06-22T09:27:18.739604
| 2023-06-12T13:43:47
| 2023-06-12T13:43:47
| 146,632,615
| 360
| 78
|
MIT
| 2023-09-02T09:08:16
| 2018-08-29T17:07:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
calendar.py
|
# calendar.py Calendar object
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2023 Peter Hinch
from extras.widgets.grid import Grid
from gui.widgets.label import Label, ALIGN_CENTER
from extras.date import DateCal
class Calendar:
def __init__(
self, wri, row, col, colwidth, fgcolor, bgcolor, today_c, cur_c, sun_c, today_inv=False, cur_inv=False
):
self.fgcolor = fgcolor
self.bgcolor = bgcolor
self.today_c = today_c # Color of "today" cell
self.today_inv = today_inv
self.cur_c = cur_c # Calendar currency
self.cur_inv = cur_inv
self.sun_c = sun_c # Sundays
self.date = DateCal()
self.date.callback = self.show
rows = 6
cols = 7
self.ncells = cols * (rows - 1) # Row 0 has day labels
self.last_cell = cols * rows
lw = (colwidth + 4) * cols # Label width = width of grid
kwargs = {"align": ALIGN_CENTER, "fgcolor": fgcolor, "bgcolor": bgcolor}
self.lbl = Label(wri, row, col, lw, **kwargs)
row += self.lbl.height + 3 # Two border widths
self.grid = Grid(wri, row, col, colwidth, rows, cols, **kwargs)
self.grid.show() # Draw grid lines
self.grid[0, 0:7] = iter([d[:3] for d in DateCal.days]) # 3-char day names
self.show()
def days(self, month_length): # Produce content for every cell
for n in range(self.ncells + 1):
yield str(n + 1) if n < month_length else ""
def show(self):
grid = self.grid
cur = self.date # Currency
self.lbl.value(f"{DateCal.months[cur.month - 1]} {cur.year}")
values = self.days(cur.month_length) # Instantiate generator
idx_1 = 7 + cur.wday_n(1) # Index of 1st of month
grid[idx_1 : self.last_cell] = values
grid[7 : idx_1] = values
# Assign colors. Last to be applied has priority.
grid[1:6, 6] = {"fgcolor": self.sun_c} # Sunday color
idx_cur = idx_1 + cur.mday - 1 # Currency (overrides Sunday)
if self.cur_inv:
grid[idx_cur] = {"fgcolor": self.bgcolor, "bgcolor": self.cur_c}
else:
grid[idx_cur] = {"fgcolor": self.cur_c}
today = DateCal()
if cur.year == today.year and cur.month == today.month: # Today is in current month
idx = idx_1 + today.mday - 1
if self.today_inv:
grid[idx] = {"fgcolor": self.bgcolor, "bgcolor": self.fgcolor}
else:
grid[idx] = {"fgcolor": self.today_c}
|
61cac04dfb8cbaf2d06e62a1587fb94e1e7001da
|
12ac38b4146253c5778d0e53eb4128dce2ae1860
|
/doc/examples/creating_data_type_packages.py
|
e5f3686eec49488dbccdae61e325e4a589cdb86d
|
[
"MIT"
] |
permissive
|
cogu/autosar
|
a75e12186877b4ace807ab5e14daa89c6fb42935
|
5db06cc46f27c1ef8edec92f6883850cfd9e61f1
|
refs/heads/master
| 2023-08-30T19:59:59.277984
| 2023-05-08T19:10:21
| 2023-05-08T19:10:21
| 63,766,974
| 291
| 152
|
MIT
| 2022-03-27T18:30:55
| 2016-07-20T09:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 545
|
py
|
creating_data_type_packages.py
|
import autosar
ws = autosar.workspace(version="4.2.2")
package=ws.createPackage('DataTypes')
baseTypes = package.createSubPackage('BaseTypes')
BaseTypeUint8 = baseTypes.createSwBaseType('uint8', 8, nativeDeclaration='uint8')
implTypes = package.createSubPackage('ImplementationTypes', role='DataType')
implTypes.createSubPackage('CompuMethods', role='CompuMethod')
implTypes.createSubPackage('DataConstrs', role='DataConstraint')
implTypes.createImplementationDataType('uint8', BaseTypeUint8.ref, 0, 255)
ws.saveXML('DataTypes.arxml')
|
cb54b66c397780cc31eaba2082eeced706c66770
|
15b3470cfed1d6363096b7dc4abfd12d4337ddff
|
/parsivar/normalizer.py
|
7ccd021af695e7360e1c7372adbe044e00f1c6f6
|
[
"MIT"
] |
permissive
|
ICTRC/Parsivar
|
82071d0e8a81963986055de477e78276e5048329
|
aad5da1ba08cbca7f1bd0ef2f3a13c3d515651b5
|
refs/heads/master
| 2023-03-07T13:40:39.195414
| 2023-03-05T13:21:13
| 2023-03-05T13:21:13
| 121,358,179
| 216
| 38
|
MIT
| 2023-03-05T13:21:14
| 2018-02-13T08:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 25,365
|
py
|
normalizer.py
|
from re import sub
import copy
import os
from .tokenizer import Tokenizer
from .data_helper import DataHelper
from .token_merger import ClassifierChunkParser
class Normalizer():
def __init__(self,
half_space_char='\u200c',
statistical_space_correction=False,
date_normalizing_needed=False,
pinglish_conversion_needed=False,
train_file_path="resource/tokenizer/Bijan_khan_chunk.txt",
token_merger_path="resource/tokenizer/TokenMerger.pckl"):
self.dir_path = os.path.dirname(os.path.realpath(__file__)) + "/"
self.dic1_path = self.dir_path + 'resource/normalizer/Dic1_new.txt'
self.dic2_path = self.dir_path + 'resource/normalizer/Dic2_new.txt'
self.dic3_path = self.dir_path + 'resource/normalizer/Dic3_new.txt'
self.dic1 = self.load_dictionary(self.dic1_path)
self.dic2 = self.load_dictionary(self.dic2_path)
self.dic3 = self.load_dictionary(self.dic3_path)
self.statistical_space_correction = statistical_space_correction
self.date_normalizing_needed = date_normalizing_needed
self.pinglish_conversion_needed = pinglish_conversion_needed
self.data_helper = DataHelper()
self.token_merger = ClassifierChunkParser()
if self.date_normalizing_needed or self.pinglish_conversion_needed:
self.tokenizer = Tokenizer()
self.date_normalizer = DateNormalizer()
self.pinglish_conversion = PinglishNormalizer()
if self.statistical_space_correction:
self.token_merger_path = self.dir_path + token_merger_path
self.train_file_path = train_file_path
self.half_space_char = half_space_char
if os.path.isfile(self.token_merger_path):
self.token_merger_model = self.data_helper.load_var(self.token_merger_path)
elif os.path.isfile(self.train_file_path):
self.token_merger_model = self.token_merger.train_merger(self.train_file_path, test_split=0)
self.data_helper.save_var(self.token_merger_path, self.token_merger_model)
def load_dictionary(self, file_path):
dict = {}
with open(file_path, 'r', encoding='utf-8') as f:
g = f.readlines()
for Wrds in g:
wrd = Wrds.split(' ')
dict[wrd[0].strip()] = sub('\n', '', wrd[1].strip())
return dict
def sub_alphabets(self, doc_string):
# try:
# doc_string = doc_string.decode('utf-8')
# except UnicodeEncodeError:
# pass
a0 = "ء"
b0 = "ئ"
c0 = sub(a0, b0, doc_string)
a1 = r"ٲ|ٱ|إ|ﺍ|أ"
a11 = r"ﺁ|آ"
b1 = r"ا"
b11 = r"آ"
c11 = sub(a11, b11, c0)
c1 = sub(a1, b1, c11)
a2 = r"ﺐ|ﺏ|ﺑ"
b2 = r"ب"
c2 = sub(a2, b2, c1)
a3 = r"ﭖ|ﭗ|ﭙ|ﺒ|ﭘ"
b3 = r"پ"
c3 = sub(a3, b3, c2)
a4 = r"ﭡ|ٺ|ٹ|ﭞ|ٿ|ټ|ﺕ|ﺗ|ﺖ|ﺘ"
b4 = r"ت"
c4 = sub(a4, b4, c3)
a5 = r"ﺙ|ﺛ"
b5 = r"ث"
c5 = sub(a5, b5, c4)
a6 = r"ﺝ|ڃ|ﺠ|ﺟ"
b6 = r"ج"
c6 = sub(a6, b6, c5)
a7 = r"ڃ|ﭽ|ﭼ"
b7 = r"چ"
c7 = sub(a7, b7, c6)
a8 = r"ﺢ|ﺤ|څ|ځ|ﺣ"
b8 = r"ح"
c8 = sub(a8, b8, c7)
a9 = r"ﺥ|ﺦ|ﺨ|ﺧ"
b9 = r"خ"
c9 = sub(a9, b9, c8)
a10 = r"ڏ|ډ|ﺪ|ﺩ"
b10 = r"د"
c10 = sub(a10, b10, c9)
a11 = r"ﺫ|ﺬ|ﻧ"
b11 = r"ذ"
c11 = sub(a11, b11, c10)
a12 = r"ڙ|ڗ|ڒ|ڑ|ڕ|ﺭ|ﺮ"
b12 = r"ر"
c12 = sub(a12, b12, c11)
a13 = r"ﺰ|ﺯ"
b13 = r"ز"
c13 = sub(a13, b13, c12)
a14 = r"ﮊ"
b14 = r"ژ"
c14 = sub(a14, b14, c13)
a15 = r"ݭ|ݜ|ﺱ|ﺲ|ښ|ﺴ|ﺳ"
b15 = r"س"
c15 = sub(a15, b15, c14)
a16 = r"ﺵ|ﺶ|ﺸ|ﺷ"
b16 = r"ش"
c16 = sub(a16, b16, c15)
a17 = r"ﺺ|ﺼ|ﺻ"
b17 = r"ص"
c17 = sub(a17, b17, c16)
a18 = r"ﺽ|ﺾ|ﺿ|ﻀ"
b18 = r"ض"
c18 = sub(a18, b18, c17)
a19 = r"ﻁ|ﻂ|ﻃ|ﻄ"
b19 = r"ط"
c19 = sub(a19, b19, c18)
a20 = r"ﻆ|ﻇ|ﻈ"
b20 = r"ظ"
c20 = sub(a20, b20, c19)
a21 = r"ڠ|ﻉ|ﻊ|ﻋ"
b21 = r"ع"
c21 = sub(a21, b21, c20)
a22 = r"ﻎ|ۼ|ﻍ|ﻐ|ﻏ"
b22 = r"غ"
c22 = sub(a22, b22, c21)
a23 = r"ﻒ|ﻑ|ﻔ|ﻓ"
b23 = r"ف"
c23 = sub(a23, b23, c22)
a24 = r"ﻕ|ڤ|ﻖ|ﻗ"
b24 = r"ق"
c24 = sub(a24, b24, c23)
a25 = r"ڭ|ﻚ|ﮎ|ﻜ|ﮏ|ګ|ﻛ|ﮑ|ﮐ|ڪ|ك"
b25 = r"ک"
c25 = sub(a25, b25, c24)
a26 = r"ﮚ|ﮒ|ﮓ|ﮕ|ﮔ"
b26 = r"گ"
c26 = sub(a26, b26, c25)
a27 = r"ﻝ|ﻞ|ﻠ|ڵ"
b27 = r"ل"
c27 = sub(a27, b27, c26)
a28 = r"ﻡ|ﻤ|ﻢ|ﻣ"
b28 = r"م"
c28 = sub(a28, b28, c27)
a29 = r"ڼ|ﻦ|ﻥ|ﻨ"
b29 = r"ن"
c29 = sub(a29, b29, c28)
a30 = r"ވ|ﯙ|ۈ|ۋ|ﺆ|ۊ|ۇ|ۏ|ۅ|ۉ|ﻭ|ﻮ|ؤ"
b30 = r"و"
c30 = sub(a30, b30, c29)
a31 = r"ﺔ|ﻬ|ھ|ﻩ|ﻫ|ﻪ|ۀ|ە|ة|ہ"
b31 = r"ه"
c31 = sub(a31, b31, c30)
a32 = r"ﭛ|ﻯ|ۍ|ﻰ|ﻱ|ﻲ|ں|ﻳ|ﻴ|ﯼ|ې|ﯽ|ﯾ|ﯿ|ێ|ے|ى|ي"
b32 = r"ی"
c32 = sub(a32, b32, c31)
a33 = r'¬'
b33 = r''
c33 = sub(a33, b33, c32)
pa0 = r'•|·|●|·|・|∙|。|ⴰ'
pb0 = r'.'
pc0 = sub(pa0, pb0, c33)
pa1 = r',|٬|٫|‚|,'
pb1 = r'،'
pc1 = sub(pa1, pb1, pc0)
pa2 = r'ʕ'
pb2 = r'؟'
pc2 = sub(pa2, pb2, pc1)
na0 = r'۰|٠'
nb0 = r'0'
nc0 = sub(na0, nb0, pc2)
na1 = r'۱|١'
nb1 = r'1'
nc1 = sub(na1, nb1, nc0)
na2 = r'۲|٢'
nb2 = r'2'
nc2 = sub(na2, nb2, nc1)
na3 = r'۳|٣'
nb3 = r'3'
nc3 = sub(na3, nb3, nc2)
na4 = r'۴|٤'
nb4 = r'4'
nc4 = sub(na4, nb4, nc3)
na5 = r'۵'
nb5 = r'5'
nc5 = sub(na5, nb5, nc4)
na6 = r'۶|٦'
nb6 = r'6'
nc6 = sub(na6, nb6, nc5)
na7 = r'۷|٧'
nb7 = r'7'
nc7 = sub(na7, nb7, nc6)
na8 = r'۸|٨'
nb8 = r'8'
nc8 = sub(na8, nb8, nc7)
na9 = r'۹|٩'
nb9 = r'9'
nc9 = sub(na9, nb9, nc8)
ea1 = r'ـ|ِ|ُ|َ|ٍ|ٌ|ً|'
eb1 = r''
ec1 = sub(ea1, eb1, nc9)
Sa1 = r'( )+'
Sb1 = r' '
Sc1 = sub(Sa1, Sb1, ec1)
Sa2 = r'(\n)+'
Sb2 = r'\n'
Sc2 = sub(Sa2, Sb2, Sc1)
return Sc2
def space_correction(self, doc_string):
a00 = r'^(بی|می|نمی)( )'
b00 = r'\1'
c00 = sub(a00, b00, doc_string)
a0 = r'( )(می|نمی|بی)( )'
b0 = r'\1\2'
c0 = sub(a0, b0, c00)
a1 = r'( )(هایی|ها|های|ایی|هایم|هایت|هایش|هایمان|هایتان|هایشان|ات|ان|ین' \
r'|انی|بان|ام|ای|یم|ید|اید|اند|بودم|بودی|بود|بودیم|بودید|بودند|ست)( )'
b1 = r'\2\3'
c1 = sub(a1, b1, c0)
a2 = r'( )(شده|نشده)( )'
b2 = r'\2'
c2 = sub(a2, b2, c1)
a3 = r'( )(طلبان|طلب|گرایی|گرایان|شناس|شناسی|گذاری|گذار|گذاران|شناسان|گیری|پذیری|بندی|آوری|سازی|' \
r'بندی|کننده|کنندگان|گیری|پرداز|پردازی|پردازان|آمیز|سنجی|ریزی|داری|دهنده|آمیز|پذیری' \
r'|پذیر|پذیران|گر|ریز|ریزی|رسانی|یاب|یابی|گانه|گانهای|انگاری|گا|بند|رسانی|دهندگان|دار)( )'
b3 = r'\2\3'
c3 = sub(a3, b3, c2)
return c3
def space_correction_plus1(self, doc_string):
out_sentences = ''
for wrd in doc_string.split(' '):
try:
out_sentences = out_sentences + ' ' + self.dic1[wrd]
except KeyError:
out_sentences = out_sentences + ' ' + wrd
return out_sentences
def space_correction_plus2(self, doc_string):
out_sentences = ''
wrds = doc_string.split(' ')
L = wrds.__len__()
if L < 2:
return doc_string
cnt = 1
for i in range(0, L - 1):
w = wrds[i] + wrds[i + 1]
try:
out_sentences = out_sentences + ' ' + self.dic2[w]
cnt = 0
except KeyError:
if cnt == 1:
out_sentences = out_sentences + ' ' + wrds[i]
cnt = 1
if cnt == 1:
out_sentences = out_sentences + ' ' + wrds[i + 1]
return out_sentences
def space_correction_plus3(self, doc_string):
# Dict = {'گفتوگو': 'گفتوگو'}
out_sentences = ''
wrds = doc_string.split(' ')
L = wrds.__len__()
if L < 3:
return doc_string
cnt = 1
cnt2 = 0
for i in range(0, L - 2):
w = wrds[i] + wrds[i + 1] + wrds[i + 2]
try:
out_sentences = out_sentences + ' ' + self.dic3[w]
cnt = 0
cnt2 = 2
except KeyError:
if cnt == 1 and cnt2 == 0:
out_sentences = out_sentences + ' ' + wrds[i]
else:
cnt2 -= 1
cnt = 1
if cnt == 1 and cnt2 == 0:
out_sentences = out_sentences + ' ' + wrds[i + 1] + ' ' + wrds[i + 2]
elif cnt == 1 and cnt2 == 1:
out_sentences = out_sentences + ' ' + wrds[i + 2]
return out_sentences
def normalize(self, doc_string, new_line_elimination=False):
normalized_string = self.sub_alphabets(doc_string)
normalized_string = self.data_helper.clean_text(normalized_string, new_line_elimination).strip()
if self.statistical_space_correction:
token_list = normalized_string.strip().split()
token_list = [x.strip("\u200c") for x in token_list if len(x.strip("\u200c")) != 0]
token_list = self.token_merger.merg_tokens(token_list, self.token_merger_model, self.half_space_char)
normalized_string = " ".join(x for x in token_list)
normalized_string = self.data_helper.clean_text(normalized_string, new_line_elimination)
else:
normalized_string = self.space_correction(self.space_correction_plus1(self.space_correction_plus2(self.space_correction_plus3(normalized_string)))).strip()
if self.pinglish_conversion_needed:
normalized_string = self.pinglish_conversion.pingilish2persian(self.tokenizer.tokenize_words(normalized_string))
if self.date_normalizing_needed:
normalized_string = self.date_normalizer.normalize_dates(self.date_normalizer.normalize_numbers(self.tokenizer.tokenize_words(normalized_string)).split())
return normalized_string
class DateNormalizer():
def __init__(self):
self.month_dict = {"فروردین": 1, "اردیبهشت": 2, "خرداد": 3,
"تیر": 4, "مرداد": 5, "شهریور": 6,
"مهر": 7, "آبان": 8, "آذر": 9,
"دی": 10, "بهمن": 11, "اسفند": 12}
self.num_dict = {"صد": 100, "هزار": 1000, "میلیون": 1000000, "دویست": 200,
"ده": 10, "نه": 9, "هشت": 8, "هفت": 7, "شش": 6, "پنج": 5,
"چهار": 4, "سه": 3, "دو": 2, "یک": 1, "یازده": 11, "سیزده": 13,
"چهارده": 14, "دوازده": 12, "پانزده": 15, "شانزده": 16, "هفده": 17,
"هجده": 18, "نوزده": 19, "بیست": 20, "سی": 30, "چهل": 40, "پنجاه": 50,
"شصت": 60, "هفتاد": 70, "نود": 90, "سیصد": 300, "چهارصد": 400,
"پانصد": 500, "ششصد": 600, "هفتصد": 700, "هشتصد": 800, "نهصد": 900,
"هشتاد": 80, " ": 0, "میلیارد": 1000000000,
"صدم": 100, "هزارم": 1000, "دویستم": 200,
"دهم": 10, "نهم": 9, "هشتم": 8, "هفتم": 7, "ششم": 6, "پنجم": 5,
"چهارم": 4, "سوم": 3, "دوم": 2, "یکم": 1, "اول": 1, "یازدهم": 11, "سیزدهم": 13,
"چهاردهم": 14, "دوازدهم": 12, "پانزدهم": 15, "شانزدهم": 16, "هفدهم": 17,
"هجدهم": 18, "نوزدهم": 19, "بیستم": 20, "چهلم": 40, "پنجاهم": 50,
"شصتم": 60, "هفتادم": 70, "نودم": 90, "سیصدم": 300, "چهارصدم": 400,
"پانصدم": 500, "ششصدم": 600, "هفتصدم": 700, "هشتصدم": 800, "نهصدم": 900,
"هشتادم": 80}
def find_date_part(self, token_list):
for index, element in enumerate(token_list):
if element == "/":
if index-1 >= 0 and index+1 < len(token_list) \
and token_list[index -1].isdigit() and token_list[index+1].isdigit():
if index+3 < len(token_list) and token_list[index+2] == "/" \
and token_list[index + 3].isdigit():
formal_date = [int(token_list[index-1]), int(token_list[index+1]), int(token_list[index+3])]
formal_date = "y" + str(formal_date[2]) + "m" + str(formal_date[1]) + "d" + str(formal_date[0])
return formal_date, index-1, index+3
else:
formal_date = [int(token_list[index-1]), int(token_list[index+ 1]), 0]
formal_date = "y" + str(formal_date[2]) + "m" + str(formal_date[1]) + "d" + str(formal_date[0])
return formal_date, index-1 , index+1
if element in self.month_dict or element == "سال":
if index + 1 < len(token_list) and index - 1 > -2:
try:
formal_date = [int(token_list[index - 1]), int(self.month_dict[token_list[index]]), int(token_list[index + 1])]
formal_date = "y" + str(formal_date[2]) + "m" + str(formal_date[1]) + "d" + str(formal_date[0])
if token_list[index - 1] and token_list[index + 1]:
return formal_date, index-1, index+1
except:
try:
formal_date = [int(token_list[index - 1]), int(self.month_dict[token_list[index]]), 0]
formal_date = "y" + str(formal_date[2]) + "m" + str(formal_date[1]) + "d" + str(formal_date[0])
return formal_date, index-1, index
except:
try:
if token_list[index] == "سال":
formal_date = [int(token_list[index + 1]),0, 0]
formal_date = "y" + str(formal_date[2]) + "m" + str(formal_date[1]) + "d" + str(formal_date[0])
return formal_date, index+1, index+1
else:
print("error")
except:
pass
def normalize_dates(self, token_list):
finded = self.find_date_part(token_list)
if finded != None:
date_part = finded[0]
start_date_index = finded[1]
end_date_index = finded[2]
befor_date_part = " ".join(x for x in token_list[:start_date_index])
after_date_part = [x for x in token_list[end_date_index + 1:]]
return befor_date_part + " " + date_part + " " + self.normalize_dates(after_date_part)
else:
return " ".join(x for x in token_list)
def list2num(self, numerical_section_list):
value = 1
for index, el in enumerate(numerical_section_list):
if self.is_number(el):
value *= self.num_dict[el]
else:
value *= float(el)
return value
def convert2num(self, numerical_section_list):
value = 0
tmp_section_list = []
for index, el in enumerate(numerical_section_list):
if self.is_number(el) or (el.replace('.', '', 1).isdigit()):
tmp_section_list.append(el)
elif el == "و":
value += self.list2num(tmp_section_list)
tmp_section_list[:] = []
if len(tmp_section_list) > 0:
value += self.list2num(tmp_section_list)
tmp_section_list[:] = []
if (value-int(value) == 0):
return int(value)
else:
return value
def is_number(self, word):
return word in self.num_dict
def find_number_location(self, token_list):
start_index = 0
number_section =[]
for i , el in enumerate(token_list):
if self.is_number(el) or (el.replace('.', '', 1).isdigit()):
start_index = i
number_section.append(start_index)
break
i = start_index+1
while(i < len(token_list)):
if token_list[i] == "و" and (i+1)<len(token_list):
if self.is_number(token_list[i+1]) or (token_list[i+1].replace('.', '', 1).isdigit()):
number_section.append(i)
number_section.append(i+1)
i += 2
else:
break
elif self.is_number(token_list[i]) or (token_list[i].replace('.', '', 1).isdigit()):
number_section.append(i)
i += 1
else:
break
return number_section
def normalize_numbers(self, token_list, converted=""):
for i, el in enumerate(token_list):
if el.endswith("ین") and self.is_number(el[:-2]):
token_list[i] = el[:-2]
finded = self.find_number_location(token_list)
if len(finded) == 0:
rest_of_string = " ".join(t for t in token_list)
return converted + " " + rest_of_string
else:
numerical_subsection = [token_list[x] for x in finded]
numerical_subsection = self.convert2num(numerical_subsection)
converted = converted + " " + " ".join(x for x in token_list[:finded[0]]) + " " + str(numerical_subsection)
new_index = finded[-1] + 1
return self.normalize_numbers(token_list[new_index:], converted)
class PinglishNormalizer():
def __init__(self):
self.data_helper = DataHelper()
self.file_dir = os.path.dirname(os.path.realpath(__file__)) + "/"
self.en_dict_filename = self.file_dir + "resource/tokenizer/enDict"
self.en_dict = self.data_helper.load_var(self.en_dict_filename)
self.fa_dict_filename = self.file_dir + "resource/tokenizer/faDict"
self.fa_dict = self.data_helper.load_var(self.fa_dict_filename)
def pingilish2persian(self, pinglish_words_list):
for i, word in enumerate(pinglish_words_list):
if word in self.en_dict:
pinglish_words_list[i] = self.en_dict[word]#.decode("utf-8")
#inp = inp.replace(word, enDict[word], 1)
else:
ch = self.characterize(word)
pr = self.map_char(ch)
amir = self.make_word(pr)
for wd in amir:
am = self.escalation(wd)
asd = ''.join(am)
if asd in self.fa_dict:
pinglish_words_list[i] = asd#.decode("utf-8")
#inp = inp.replace(word, asd, 1)
inp = " ".join(x for x in pinglish_words_list)
return inp
def characterize(self, word):
list_of_char = []
i = 0
while i < len(word):
char = word[i]
sw_out = self.switcher(char)
if (sw_out == None):
esp_out = None
if(i < len(word) - 1):
esp_out = self.esp_check(word[i], word[i + 1])
if(esp_out == None):
list_of_char.append(word[i])
else:
list_of_char.append(esp_out)
i += 1
else:
list_of_char.append(sw_out)
i += 1
return list_of_char
def switcher(self, ch):
switcher = {
"c": None,
"k": None,
"z": None,
"s": None,
"g": None,
"a": None,
"u": None,
"e": None,
"o": None
}
return switcher.get(ch, ch)
def esp_check(self, char1, char2):
st = char1 + char2
if (st == "ch"):
return "ch"
elif (st == "kh"):
return "kh"
elif (st == "zh"):
return "zh"
elif (st == "sh"):
return "sh"
elif (st == "gh"):
return "gh"
elif (st == "aa"):
return "aa"
elif (st == "ee"):
return "ee"
elif (st == "oo"):
return "oo"
elif (st == "ou"):
return "ou"
else:
return None
def map_char(self, word):
listm = []
sw_out = self.map_switcher(word[0])
i = 0
if (sw_out == None):
listm.append(["ا"])
i += 1
if (word[0] == "oo"):
listm.append(["او"])
i += 1
while i < len(word):
listm.append(self.char_switcher(word[i]))
i += 1
if word[len(word) - 1] == "e":
listm.append(["ه"])
elif word[len(word) - 1] == "a":
listm.append(["ا"])
elif word[len(word) - 1] == "o":
listm.append(["و"])
elif word[len(word) - 1] == "u":
listm.append(["و"])
return listm
def map_switcher(self, ch):
switcher = {
"a": None,
"e": None,
"o": None,
"u": None,
"ee": None,
"ou": None
}
return switcher.get(ch, ch)
def make_word(self, chp):
word_list = [[]]
for char in chp:
word_list_temp = []
for tmp_word_list in word_list:
for chch in char:
tmp = copy.deepcopy(tmp_word_list)
tmp.append(chch)
word_list_temp.append(tmp)
word_list = word_list_temp
return word_list
def escalation(self, word):
tmp = []
i = 0
t = len(word)
while i < t - 1:
tmp.append(word[i])
if word[i] == word[i + 1]:
i += 1
i += 1
if i != t:
tmp.append(word[i])
return tmp
def char_switcher(self, ch):
switcher = {
'a': ["", "ا"],
'c': ["ث", "ص", "ص"],
'h': ["ه", "ح"],
'b': ["ب"],
'p': ["پ"],
't': ["ت", "ط"],
's': ["س", "ص", "ث"],
'j': ["ج"],
'ch': ["چ"],
'kh': ["خ"],
'q': ["ق", "غ"],
'd': ["د"],
'z': ["ز", "ذ", "ض", "ظ"],
'r': ["ر"],
'zh': ["ژ"],
'sh': ["ش"],
'gh': [",ق", "غ"],
'f': ["ف"],
'k': ["ک"],
'g': ["گ"],
'l': ["ل"],
'm': ["م"],
'n': ["ن"],
'v': ["و"],
'aa': ["ا"],
'ee': ["ی"],
'oo': ["و"],
'ou': ["و"],
'i': ["ی"],
'y': ["ی"],
' ': [""],
'w': ["و"],
'e': ["", "ه"],
'o': ["", "و"]
}
return switcher.get(ch, "")
|
52b480c7864215325e293173930a36f355a88280
|
7bc1d8634529eac952490399fb71f10bcedf05cc
|
/tests/scripts/thread-cert/pktverify/verify_result.py
|
012cce407d4bc03d72a62a31427af1d7eec02028
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] |
permissive
|
openthread/openthread
|
6a9e25d1cd224bde9796d9616f04f423dba27d77
|
102a631cb3f8938389d0d10199a14c59184039cd
|
refs/heads/main
| 2023-08-18T10:46:03.820124
| 2023-08-17T22:20:55
| 2023-08-17T22:20:55
| 55,808,787
| 3,485
| 1,296
|
BSD-3-Clause
| 2023-09-14T15:50:53
| 2016-04-08T20:47:41
|
C++
|
UTF-8
|
Python
| false
| false
| 3,167
|
py
|
verify_result.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pktverify.packet import Packet
from pktverify.packet_filter import PacketFilter
class VerifyResult(object):
"""
Represents a verification result of a packet.
"""
__slots__ = ('_packet_found', '_packet_indexes', '_seek_indexes')
def __init__(self):
self._packet_found = {}
self._packet_indexes = {}
self._seek_indexes = {}
def record_last(self, name: str, pkts: PacketFilter) -> None:
"""
Record the information of the last found packet.
:param name: The record name.
:param pkts: The packet filter.
"""
assert name not in self._packet_found, f'duplicate name: {name}'
self._packet_found[name] = pkts.last()
self._packet_indexes[name] = pkts.last_index
self._seek_indexes[name] = pkts.index
def packet_index(self, name: str) -> int:
"""
Returns a recorded packet index.
:param name: The record name.
:return: The packet index.
"""
return self._packet_indexes[name]
def packet(self, name: str) -> Packet:
"""
Returns the recorded packet.
:param name: The record name.
:return: The packet.
"""
return self._packet_found[name]
def seek_index(self, name: str) -> tuple:
"""
Returns the recorded seek index.
:param name: The record name.
:return: The seek index.
"""
return self._seek_indexes[name]
def __str__(self):
return "VerifyResult%s" % self._packet_found
|
b4845737c38b5c97dc166b3a263fe1f0c4b7f158
|
3daf74bdadb46f4aa18918f1b6938c714b331723
|
/poco/drivers/windows/__init__.py
|
4bb6c09a18ad4f62e4f67f1955e98ac05f7141b2
|
[
"Apache-2.0"
] |
permissive
|
AirtestProject/Poco
|
d173b465edefbae72f02bb11d60edfa5af8d4ec4
|
65c2c5be0c0c1de680eedf34ac18ae065c5408ee
|
refs/heads/master
| 2023-08-15T23:00:11.805669
| 2023-03-29T08:58:41
| 2023-03-29T08:58:41
| 118,706,014
| 1,703
| 312
|
Apache-2.0
| 2023-08-08T10:30:21
| 2018-01-24T03:24:01
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
__init__.py
|
# coding=utf-8
from poco.drivers.windows.windowsui_poco import WindowsPoco
|
8d14d7975d7923676f576cd11c063dce717afef4
|
d3aa7d6fdb3aeb627a5951ab4826108b58341d7f
|
/apps/base/models/location_models.py
|
1441c2225cf80e651e90e148df538ea418e91b73
|
[] |
no_license
|
JoneXiong/YouPBX
|
d92b58ea036570afbc2f7f4e52d6e8ca11a4ec7c
|
4e3156877e22e19959f6f8978feae53089e9b97c
|
refs/heads/master
| 2023-05-05T17:35:06.185313
| 2022-05-15T09:29:49
| 2022-05-15T09:29:49
| 67,493,869
| 269
| 124
| null | 2023-04-17T11:19:33
| 2016-09-06T09:29:34
|
Python
|
UTF-8
|
Python
| false
| false
| 476
|
py
|
location_models.py
|
# coding=utf-8
from django.db import models
'''
location & domain
'''
class Location(models.Model):
'''
Domain
'''
location_name = models.CharField(u'名称', max_length=64)
domain_name = models.CharField(u'域标识', max_length=64)
class Meta:
app_label = 'base'
verbose_name = u'域(Location)'
verbose_name_plural = verbose_name
def __unicode__(self):
return '%s (%s)'%(self.location_name, self.domain_name)
|
fc50c40aae77ed89987c77dfe3dbbe01482fe268
|
6a559bd73851b7a19c2a9ad1a995aa89b42915a0
|
/migrations/versions/59264050f39_.py
|
d714e320010745bdc4b5d16dd7dfee7ad2a8973f
|
[
"MIT"
] |
permissive
|
eleweek/WatchPeopleCode
|
b1825088eb225a6a737db5f9f9fe80dbc0a9138a
|
2389fe0b8eb040f553f847b9e1686883c4bd1388
|
refs/heads/master
| 2022-12-12T12:59:12.262107
| 2017-11-05T15:26:20
| 2017-11-05T15:26:20
| 29,928,164
| 200
| 24
|
MIT
| 2022-12-08T00:39:04
| 2015-01-27T18:26:00
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
59264050f39_.py
|
"""empty message
Revision ID: 59264050f39
Revises: 20e031b23d18
Create Date: 2015-02-04 17:18:39.703657
"""
# revision identifiers, used by Alembic.
revision = '59264050f39'
down_revision = '20e031b23d18'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('stream', sa.Column('title', sa.String(length=100), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('stream', 'title')
### end Alembic commands ###
|
1f19c3062ddd9a9789f8b50e3abd9217a72e84ab
|
27b86f422246a78704e0e84983b2630533a47db6
|
/examples/addons/dont_use_dimline_addons.py
|
2c0ee749c1f1a39723ffdf58abba68e30c8dac23
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,792
|
py
|
dont_use_dimline_addons.py
|
# Copyright (c) 2010-2022, Manfred Moitzi
# License: MIT License
import pathlib
import ezdxf
from ezdxf.addons import dimstyles, LinearDimension, AngularDimension
from ezdxf.addons import ArcDimension, RadialDimension
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# ------------------------------------------------------------------------------
# These add-ons are obsolete since the rendering of DIMENSION entities is
# supported by ezdxf but these add-ons will be preserved as they are!
# ------------------------------------------------------------------------------
# create a new drawing: dxfwrite.DXFEngine.drawing(filename)
NAME = "dimlines.dxf"
doc = ezdxf.new("R12")
msp = doc.modelspace()
def render(dimline):
dimline.render(msp)
# add block and layer definition to drawing
dimstyles.setup(doc)
# create a dimension line for following points
points = [(1.7, 2.5), (0, 0), (3.3, 6.9), (8, 12)]
# define new dimstyles, for predefined ticks see dimlines.py
dimstyles.new("dots", tick="DIMTICK_DOT", scale=1.0, roundval=2, textabove=0.5)
dimstyles.new("arrow", tick="DIMTICK_ARROW", tick2x=True, dimlineext=0.0)
dimstyles.new("dots2", tick="DIMTICK_DOT", tickfactor=0.5)
# add linear dimension lines
render(LinearDimension((3, 3), points, dimstyle="dots", angle=15.0))
render(LinearDimension((0, 3), points, angle=90.0))
render(LinearDimension((-2, 14), points, dimstyle="arrow", angle=-10))
# next dimline is added as anonymous block
block = doc.blocks.new_anonymous_block()
msp.add_blockref(block.name, insert=(0, 0), dxfattribs={"layer": "DIMENSIONS"})
dimline = LinearDimension((-2, 3), points, dimstyle="dots2", angle=90.0)
dimline.set_text(1, "CATCH")
dimline.render(block)
# add polyline to drawing
msp.add_polyline2d(points, dxfattribs={"color": 5})
# There are three dimstyle presets for angular dimension
# 'angle.deg' (default), 'angle.rad', 'angle.grad' (gon)
# for deg and grad default roundval = 0
# for rad default roundval = 3
# angular dimension in grad (gon)
render(
AngularDimension(
pos=(18, 5),
center=(15, 0),
start=(20, 0),
end=(20, 5),
dimstyle="angle.grad",
)
)
# angular dimension in degree (default dimstyle), with one fractional digit
render(
AngularDimension(
pos=(18, 10), center=(15, 5), start=(20, 5), end=(20, 10), roundval=1
)
)
render(
ArcDimension(
pos=(23, 5),
center=(20, 0),
start=(25, 0),
end=(25, 5),
dimstyle="dots2",
)
)
# RadiusDimension has a special tick
dimstyles.new("radius", height=0.25, prefix="R=")
render(RadialDimension((20, 0), (24, 1.5), dimstyle="radius"))
filepath = CWD / NAME
doc.saveas(filepath)
print(f"drawing '{filepath}' created.")
|
d4cb3114b129583810bf51233f1e35ed27d8fcdc
|
cc771eb1ed772713fbc303c8db828d5552186c7f
|
/mmdetection/yolov3_mobilenetv2_320_300e_coco.py
|
26965c0dcab9df0cd0b3b459939f91d417b8663b
|
[
"Apache-2.0"
] |
permissive
|
Lipurple/Grounded-Diffusion
|
49a48a0904f9eae1394e31d7503708ce84c4e005
|
7c4aa8c974171b74501c0f60db50a3a5a463a3df
|
refs/heads/main
| 2023-05-22T09:35:06.443563
| 2023-02-27T15:20:21
| 2023-02-27T15:20:21
| 587,403,131
| 119
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,183
|
py
|
yolov3_mobilenetv2_320_300e_coco.py
|
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
opencv_num_threads = 0
mp_start_method = 'fork'
auto_scale_lr = dict(enable=False, base_batch_size=192)
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[320, 96, 32],
out_channels=[96, 96, 96]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[96, 96, 96],
out_channels=[96, 96, 96],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(220, 125), (128, 222), (264, 266)],
[(35, 87), (102, 96), (60, 170)],
[(10, 15), (24, 36), (72, 42)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
type='RepeatDataset',
times=10,
dataset=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_train2017.json',
img_prefix='data/coco/train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
])),
val=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_val2017.json',
img_prefix='data/coco/val2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]),
test=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_val2017.json',
img_prefix='data/coco/val2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]))
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=4000,
warmup_ratio=0.0001,
step=[24, 28])
runner = dict(type='EpochBasedRunner', max_epochs=30)
evaluation = dict(interval=1, metric=['bbox'])
find_unused_parameters = True
|
b7904d157bee07cef5c5b209a1923225cf10178f
|
64ab5b65afdf8d950c4b56ad2259133b95fc2fec
|
/zeus/api/resources/build_jobs.py
|
63bf4c84c22f95175ffda9e95b10191a56131376
|
[
"Apache-2.0"
] |
permissive
|
getsentry/zeus
|
3e88895443b23278fdb4c25121422ee214630512
|
6d4a490c19ebe406b551641a022ca08f26c21fcb
|
refs/heads/master
| 2023-09-01T14:20:11.396306
| 2021-04-30T17:08:33
| 2021-04-30T17:08:33
| 96,131,433
| 222
| 27
|
Apache-2.0
| 2022-06-01T03:17:16
| 2017-07-03T16:39:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
build_jobs.py
|
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import subqueryload_all
from zeus.config import celery, db
from zeus.constants import Status
from zeus.models import Build, Job
from zeus.utils import timezone
from .base_build import BaseBuildResource
from ..schemas import JobSchema
job_schema = JobSchema()
jobs_schema = JobSchema(many=True)
class BuildJobsResource(BaseBuildResource):
def get(self, build: Build):
"""
Return a list of jobs for a given build.
"""
query = (
Job.query.options(subqueryload_all("stats"), subqueryload_all("failures"))
.filter(Job.build_id == build.id)
.order_by(Job.number.asc())
)
return self.respond_with_schema(jobs_schema, query)
def post(self, build: Build):
"""
Create a new job.
"""
data = self.schema_from_request(job_schema, partial=True)
job = Job(build=build, repository_id=build.repository_id, **data)
if job.status != Status.queued and not job.date_started:
job.date_started = timezone.now()
job.date_updated = timezone.now()
db.session.add(job)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return self.respond(status=422)
celery.delay("zeus.aggregate_build_stats_for_job", job_id=job.id)
return self.respond_with_schema(job_schema, job)
|
edbb6262562b084b1fa20078e439588526ee1694
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/cloud/test_tts.py
|
ba88ae2af2d4b4b08e4dece15c8a0990fdf60428
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,324
|
py
|
test_tts.py
|
"""Tests for cloud tts."""
from unittest.mock import Mock
from hass_nabucasa import voice
import pytest
import voluptuous as vol
from homeassistant.components.cloud import const, tts
from homeassistant.core import HomeAssistant
@pytest.fixture
def cloud_with_prefs(cloud_prefs):
"""Return a cloud mock with prefs."""
return Mock(client=Mock(prefs=cloud_prefs))
def test_default_exists() -> None:
"""Test our default language exists."""
assert const.DEFAULT_TTS_DEFAULT_VOICE in voice.MAP_VOICE
def test_schema() -> None:
"""Test schema."""
assert "nl-NL" in tts.SUPPORT_LANGUAGES
processed = tts.PLATFORM_SCHEMA({"platform": "cloud", "language": "nl-NL"})
assert processed["gender"] == "female"
with pytest.raises(vol.Invalid):
tts.PLATFORM_SCHEMA(
{"platform": "cloud", "language": "non-existing", "gender": "female"}
)
with pytest.raises(vol.Invalid):
tts.PLATFORM_SCHEMA(
{"platform": "cloud", "language": "nl-NL", "gender": "not-supported"}
)
# Should not raise
tts.PLATFORM_SCHEMA({"platform": "cloud", "language": "nl-NL", "gender": "female"})
tts.PLATFORM_SCHEMA({"platform": "cloud"})
async def test_prefs_default_voice(
hass: HomeAssistant, cloud_with_prefs, cloud_prefs
) -> None:
"""Test cloud provider uses the preferences."""
assert cloud_prefs.tts_default_voice == ("en-US", "female")
tts_info = {"platform_loaded": Mock()}
provider_pref = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, tts_info
)
provider_conf = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}),
{"language": "fr-FR", "gender": "female"},
None,
)
assert provider_pref.default_language == "en-US"
assert provider_pref.default_options == {"gender": "female", "audio_output": "mp3"}
assert provider_conf.default_language == "fr-FR"
assert provider_conf.default_options == {"gender": "female", "audio_output": "mp3"}
await cloud_prefs.async_update(tts_default_voice=("nl-NL", "male"))
await hass.async_block_till_done()
assert provider_pref.default_language == "nl-NL"
assert provider_pref.default_options == {"gender": "male", "audio_output": "mp3"}
assert provider_conf.default_language == "fr-FR"
assert provider_conf.default_options == {"gender": "female", "audio_output": "mp3"}
async def test_provider_properties(cloud_with_prefs) -> None:
"""Test cloud provider."""
tts_info = {"platform_loaded": Mock()}
provider = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, tts_info
)
assert provider.supported_options == ["gender", "voice", "audio_output"]
assert "nl-NL" in provider.supported_languages
assert tts.Voice(
"ColetteNeural", "ColetteNeural"
) in provider.async_get_supported_voices("nl-NL")
async def test_get_tts_audio(cloud_with_prefs) -> None:
"""Test cloud provider."""
tts_info = {"platform_loaded": Mock()}
provider = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, tts_info
)
assert provider.supported_options == ["gender", "voice", "audio_output"]
assert "nl-NL" in provider.supported_languages
|
851d3aac3a0292152536abefcaa78142f813e2b3
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/nxos/ha.py
|
5068d1f5cedf4f93455a5cb4a4ba109798a1ff16
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 23,042
|
py
|
ha.py
|
'''HA NXOS implement function'''
# Parser
from genie.abstract import Lookup
from genie.libs import parser
from unicon.core.errors import SubCommandFailure
from unicon.eal.dialogs import Statement, Dialog
from os.path import basename, getsize
from datetime import datetime
import re
import logging
import time
import os
import stat
from ..ha import HA as HA_main
# genie
from genie.libs.sdk.libs.utils.common import set_filetransfer_attributes
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.sdk import apis
from genie.libs.sdk.apis.execute import execute_copy_run_to_start
from genie.libs.sdk.apis.utils import compare_config_dicts, get_config_dict
# pyats
from pyats.utils.objects import R, find
from pyats.utils.fileutils import FileUtils
from pyats.easypy import runtime
# unicon
from unicon.eal.dialogs import Statement, Dialog
from unicon.plugins.generic.statements import authentication_statement_list
# Parsergen
from genie.parsergen import oper_fill_tabular
# platform parser
from genie.libs.parser.nxos.show_platform import ShowModule, Dir
run_path = runtime.directory
os.chmod(run_path, os.stat(run_path)[stat.ST_MODE] | stat.S_IWOTH)
log = logging.getLogger(__name__)
class HA(HA_main):
def capture_core(self):
"""Verify if any core on the device and
upload the core file to linux if there is any
Example:
>>> capture_core()
"""
cores = self.check_and_upload_cores()
self.upload_to_server(device=self.device, cores=cores)
def check_cores(self):
"""Verify if any core on the device.
Returns:
Core files name
Example:
>>> check_cores()
"""
cores = []
# Execute command to check for cores
header = ["VDC", "Module", "Instance",
"Process-name", "PID", "Date\(Year-Month-Day Time\)"]
if self.device.alias == 'uut':
# In case of restarting process on a the main VDC
output = oper_fill_tabular(device=self.device,
show_command='show cores vdc-all',
header_fields=header, index=[5])
else:
# In case of restarting process on a sub-VDC
self.device.disconnect()
output = oper_fill_tabular(device=self.device,
show_command='show cores',
header_fields=header, index=[5])
if not output.entries:
log.info('No core found')
return []
# Parse through output to collect core information (if any)
for k in sorted(output.entries.keys(), reverse=True):
row = output.entries[k]
date = row.get("Date\(Year-Month-Day Time\)", None)
if not date:
continue
date_ = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
# Save core info
core_info = dict(module=row['Module'],
pid=row['PID'],
instance=row['Instance'],
process=row['Process-name'],
date=date.replace(" ", "_"))
cores.append(core_info)
return cores
def upload_core_to_linux(self, core):
"""Upload core files to composed path.
Args:
Mandatory:
core (`str`) : Core file
Example:
>>> upload_core_to_linux(core='RP_0_bt_logger_13899_20180112-184444-EST.core.gz')
"""
# if the setup was not done because the configure subsection did
# not run, then we do the setup here
if not hasattr(self.device, 'filetransfer_attributes'):
filetransfer = FileUtils.from_device(self.device)
set_filetransfer_attributes(self, self.device, filetransfer)
filename, core = self.get_upload_cmd(**core)
message = "Core dump upload attempt: {}".format(filename)
from_URL = 'core:{core}'.format(
core=core)
to_URL = '{protocol}://{address}/{path}/{filename}'.format(
protocol=self.device.filetransfer_attributes['protocol'],
address=self.device.filetransfer_attributes['server_address'],
path=self.device.filetransfer_attributes['path'],
filename=filename)
self.filetransfer.copyfile(device=self.device,
source=from_URL,
destination=to_URL)
def get_upload_cmd(self, module, pid, instance, date, process):
"""Compose the cores upload location
Args:
Mandatory:
module (`str`) : Module number
pid (`str`) : Process id number
instance (`str`) : Instance number
date (`str`) : Date time
process (`str`) : Process name
Returns:
Tuple of path and core information
Example:
>>> get_upload_cmd(module='27', pid='12345', instance='1',
date='Dec 12 2017', process='bgp')
"""
path = 'core_{pid}_{process}_{date}_{time}'.format(
pid=pid,
process=process,
date=date,
time=time.time())
if instance:
pid = '{pid}/{instance}'.format(pid=pid, instance=instance)
core = '//{module}/{pid}'.format(module=module, pid=pid)
return (path, core)
def clear_cores(self):
"""Clear cores.
Raises:
Unicon errors
Example:
>>> clear_cores()
"""
# Execute command to delete cores
self.device.execute('clear cores')
def check_module(self):
"""Check if all modules are ready with command "show module".
Raises:
AttributeError: No information from "show module"
AssertionError: Modules are not in status from
'ok', 'active', 'standby', 'ha-standby'
Example:
>>> check_module()
"""
show_module = ShowModule(device=self.device)
rp_expected_status = ['active', 'ha-standby']
lc_expected_status = ['ok', 'active', 'standby']
# get output
output = show_module.parse()
if 'slot' not in output:
raise AttributeError('Could not find slot for show module')
if 'rp' not in output['slot'] or 'lc' not in output['slot']:
raise AttributeError('Could not find rp or lc for show module')
for rp in output['slot']['rp']:
(module_type, value), = output['slot']['rp'][rp].items()
status = value['status']
assert status in rp_expected_status, \
'Module "{0}" has state "{1}" instead of expected state "{2}"'\
.format(rp, status, rp_expected_status)
for lc in output['slot']['lc']:
(module_type, value), = output['slot']['lc'][lc].items()
status = value['status']
assert status in lc_expected_status, \
'Module "{0}" has state "{1}" instead of expected state "{2}"'\
.format(lc, status, lc_expected_status)
def _switchover(self):
"""Do the switchover action for NXOS devices.
Raises:
Unicon errors
Example:
>>> _switchover()
"""
# Execute command to switchover
self.device.execute('system switchover')
def _reloadLc(self, lc):
"""Do the reload LC action for asr1k devices.
Args:
Mandatory:
lc (`str`) : LC slot number need to reload.
Raises:
Unicon errors
Example:
>>> _reloadLc(lc='27')
"""
# unicon
dialog = Dialog([
Statement(pattern=r'Proceed\[y\/n\]\?.*',
action='sendline(y)',
loop_continue=True,
continue_timer=False),
Statement(pattern=r'\(y\/n\)\?.*',
action='sendline(y)',
loop_continue=True,
continue_timer=False)
])
# Execute command to reload LC
self.device.execute('reload module {}'.format(lc), reply=dialog)
time.sleep(5)
def _reloadFabric(self, fabric):
"""Do the poweroff/no poweroff action for NXOS n7k devices.
Raises:
Unicon errors
Example:
>>> _reloadFabric()
"""
# Execute command to poweroff/on
self.device.configure(
'poweroff xbar {}\nno poweroff xbar {}'.format(fabric, fabric))
########################################################################
# ISSU #
########################################################################
def _prepare_issu(self, steps, upgrade_image):
"""Prepare the device for ISSU:
1. Check currect image version and upgrade image version
2. Copy upgrade image to standby RP
NXOS:
1. Copy image onto the device
Raises:
Unicon errors
Exception
Example:
>>> _prepare_issu(steps=steps, upgrade_image='someimage')
"""
# # Init
device = self.device
filetransfer = FileUtils.from_device(self.device)
if not hasattr(self.device, 'filetransfer_attributes'):
set_filetransfer_attributes(self, self.device, filetransfer)
disk = "bootflash:"
timeout_seconds = 600
with steps.start('Check available diskspace') as step:
dir_output = filetransfer.parsed_dir(disk, timeout_seconds, Dir)
if int(dir_output['disk_free_space']) < 4500000000:
step.failed(
"Not enough free space available to copy over the image.Free up atleast 4.5GB of space on {}".format(disk))
with steps.start('Copy over the issu image') as step:
# Copy ISSU upgrade image to disk
from_url = '{protocol}://{address}/{upgrade_image}'.format(
protocol=device.filetransfer_attributes['protocol'],
address=device.filetransfer_attributes['server_address'],
upgrade_image=upgrade_image)
#File copy operations have the option of running through a different network stack by using the use-kstack option
use_kstack = self.parameters.get('use_kstack', False)
filetransfer.copyfile(source=from_url, destination=disk,
device=device, vrf='management', use_kstack=use_kstack, timeout_seconds=1800)
# Verify location:<filename> exists
output = device.execute('dir {disk}{image}'.format(disk=disk,
image=basename(upgrade_image)))
if 'No such file or directory' not in output:
log.info("Copied ISSU image to '{}'".format(disk))
else:
step.failed('Required ISSU image {} not found on disk. Transfer failed.'.format(
basename(upgrade_image)))
def _perform_issu(self, steps, upgrade_image, timeout=300):
"""Perform the ND-ISSU and Disruptive ISSU on NXOS device:
NXOS:
1. execute install all <> non-disruptive
2. execute install all <>
Raises:
Unicon errors
Exception
Example:
>>> _perform_issu(steps=steps, upgrade_image='someimage')
"""
# Init
lookup = Lookup.from_device(self.device)
filetransfer = FileUtils.from_device(self.device)
statement_list = authentication_statement_list + \
[Statement(pattern=r'.*Do you want to continue with the installation\s*\(y/n\)\?\s*\[n\]',
action='sendline(y)', loop_continue=True, continue_timer=False)] + \
[Statement(pattern=r'.*Do you want to overwrite\s*\(yes/no\)\?\s* \[no\]',
action='sendline(yes)', loop_continue=True, continue_timer=False)]
dialog = Dialog(statement_list)
ctrlplane_downtime = self.parameters.get('ctrlplane_downtime')
user_boot_mode = self.parameters.get('mode')
disrupt_flag = self.parameters.get('disrupt_flag', False)
allow_disruptive = self.parameters.get('allow_disruptive', True)
issu_timeout = self.parameters.get('issu_timeout')
cfg_transfer = self.parameters.get('cfg_transfer')
cfg_timeout = self.parameters.get('cfg_timeout')
config_ver_exclude = self.parameters.get("config_ver_exclude", [])
with steps.start("Check boot mode on {}".format(self.device.hostname)) as step:
invalid_cmd = False
out = self.device.execute('show boot mode')
# p1 matches line "Current mode is <native/lxc>."
p1 = re.compile(
r'^Current\smode\sis\s(?P<mode>\w+)\.$')
# p2 matches line "% Invalid command at '^' marker."
p2 = re.compile(r'.*?\'\^ \'\smarker\.')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
sys_boot_mode = m.groupdict()['mode']
break
m = p2.match(line)
if m:
invalid_cmd = True
break
if sys_boot_mode.lower() != user_boot_mode.lower():
step.failed(
"System boot mode {} does not match user expected boot mode {}".format(sys_boot_mode, user_boot_mode))
elif invalid_cmd and user_boot_mode.lower() != 'lxc':
step.failed("System only supports lxc mode. Invalid user expected boot mode input {}".format(
user_boot_mode))
else:
step.passed(
"System boot mode {} matches user expected boot mode {}".format(sys_boot_mode, user_boot_mode))
with steps.start("Take a running-config snapshot pre trigger on {}".format(self.device.hostname)) as step:
if cfg_transfer:
self.device.execute('show run > {}_pre_issu_trig.cfg'.format(self.device.hostname),
timeout=cfg_timeout, reply=dialog)
to_url = '{protocol}://{address}/{path}'.format(
protocol=self.device.filetransfer_attributes['protocol'],
address=self.device.filetransfer_attributes['server_address'],
path=runtime.directory)
filetransfer.copyfile(source='bootflash:/{}_pre_issu_trig.cfg'.format(self.device.hostname), destination=to_url,
device=self.device, vrf='management', timeout_seconds=600)
try:
with open("{}/{}_pre_issu_trig.cfg".format(runtime.directory, self.device.hostname), "r") as pre_trig_file:
pre_cfg_str = pre_trig_file.read()
except IOError:
step.failed(
"file not found.Please check path/content of the file")
pre_trig_config = get_config_dict(pre_cfg_str)
else:
out = self.device.execute(
'show run', timeout=cfg_timeout, reply=dialog)
pre_trig_config = get_config_dict(out)
with steps.start("Perform copy run start on {}".format(self.device.hostname)):
execute_copy_run_to_start(self.device)
if disrupt_flag:
with steps.start("Performing issu on the device {}".format(self.device.hostname)):
image_name = basename(upgrade_image)
self.device.execute(
'install all nxos bootflash:{}'.format(image_name), timeout=issu_timeout,
reply=dialog)
else:
with steps.start("Performing ISSU impact only check on the device {}".format(self.device.hostname)) as step:
image_name = basename(upgrade_image)
impact_output = self.device.execute(
"show install all impact nxos bootflash:{} non-disruptive".format(image_name), timeout=600)
#Parses impact after compatibility check is done
compatibility_table = re.findall(
'(?s)(?<=Compatibility check is done:)(.*?)(?=\r\n\r\n|\n\n)', impact_output)[0]
upgrade_will_be_disruptive = True if len(re.findall(
r"[^-]disruptive", compatibility_table)) else False
if upgrade_will_be_disruptive and not allow_disruptive:
step.failed(
"Upgrade will be disruptive and disruptive ISSU is not allowed")
#Allows previous install all instance to complete before proceeding
time.sleep(5)
with steps.start("Performing non disruptive issu on the device {}".format(self.device.hostname)):
image_name = basename(upgrade_image)
self.device.execute(
'install all nxos bootflash:{} non-disruptive'.format(image_name), timeout=issu_timeout, reply=dialog)
with steps.start("Reconnect back to device {} after ISSU".format(self.device.hostname)):
reconnect_timeout = Timeout(max_time=1200, interval=120)
self._reconnect(steps=steps, timeout=reconnect_timeout)
with steps.start("Verify image version on device {} after ISSU".format(self.device.hostname)):
version_dict = lookup.parser.show_platform.\
ShowVersion(device=self.device).parse()
# version check
rs = R(['platform', 'software', 'system_image_file',
'bootflash:///{}'.format(image_name)])
ret = find([version_dict], rs, filter_=False, all_keys=True)
if not ret:
raise Exception(
"Image version mismatch after ISSU on device {}".format(self.device.hostname))
with steps.start("Verify module status and config load status on device {} after ISSU".format(self.device.hostname)):
self.device.api.verify_module_status()
config_timeout = Timeout(max_time=180, interval=30)
while config_timeout.iterate():
try:
parsed = self.device.parse(
"show logging logfile | include 'System ready'")
except SchemaEmptyParserError as e:
log.info(
"command did not return any output\n{}".format(str(e)))
config_timeout.sleep()
continue
if parsed is not None:
log.info("{}".format(parsed.q.get_values('logs', -1)))
break
config_timeout.sleep()
if not disrupt_flag:
with steps.start("Check CP downtime after on {} after ISSU".format(self.device.hostname)) as step:
if user_boot_mode.lower() == 'lxc':
step.passed(
"show install all time-stats detail unsupported on lxc mode and cp downtime is minimal")
else:
#<show install all time-stats detail> requires a maximum of 60s to fully populate post ISSU
time.sleep(60)
out = self.device.execute(
'show install all time-stats detail')
output_error = False
cp_downtime = None
for line in out.splitlines():
line = line.rstrip()
p1 = re.compile(r'^ERROR:.*$')
m = p1.match(line)
if m:
output_error = True
break
p2 = re.compile(
r'^Total\s+.*?:\s(?P<cp_downtime>\d+)\s+seconds$')
m = p2.match(line)
if m:
cp_downtime = m.groupdict()['cp_downtime']
continue
if output_error:
step.failed(
"The output shows reset-reason as disruptive. ND ISSU was not performed properly.")
elif cp_downtime is None:
step.failed(
"garbled output for show install all time-stats detail so cp_downtime was not calculated properly.")
elif int(cp_downtime) > int(ctrlplane_downtime):
step.failed(
"Control plane was down for {} seconds which is longer than user expected at {} seconds".format(cp_downtime, ctrlplane_downtime))
else:
step.passed(
"Control plane was down for {} seconds which is within an user acceptable range of {} seconds".format(cp_downtime, ctrlplane_downtime))
with steps.start("Compare post-trigger config with pre trigger config snapshot on {}".format(self.device.hostname)) as step:
if cfg_transfer:
self.device.execute('show run > {}_post_issu_trig.cfg'.format(self.device.hostname),
timeout=cfg_timeout, reply=dialog)
to_url = '{protocol}://{address}/{path}'.format(
protocol=self.device.filetransfer_attributes['protocol'],
address=self.device.filetransfer_attributes['server_address'],
path=runtime.directory)
filetransfer.copyfile(source='bootflash:/{}_post_issu_trig.cfg'.format(self.device.hostname), destination=to_url,
device=self.device, vrf='management', timeout_seconds=600)
try:
with open("{}/{}_post_issu_trig.cfg".format(runtime.directory, self.device.hostname), "r") as post_trig_file:
post_cfg_str = post_trig_file.read()
except IOError:
step.failed(
"file not found. Please check path/content of the file")
post_trig_config = get_config_dict(post_cfg_str)
else:
out = self.device.execute(
'show run', timeout=cfg_timeout, reply=dialog)
post_trig_config = get_config_dict(out)
output = compare_config_dicts(
pre_trig_config, post_trig_config, [r'(boot|version)']+config_ver_exclude)
if output:
step.failed(
"Inconsistencies in running config post trigger:{}".format(output))
|
b15b9251c6ea0b14b6580b3322935f737d99e25e
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/unit/lib/utils/test_architecture.py
|
860c513a07f6ed99b3eb3fd3ab11746df64fad97
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,434
|
py
|
test_architecture.py
|
from unittest import TestCase
from unittest.mock import Mock
from parameterized import parameterized
from samcli.commands.local.lib.exceptions import UnsupportedRuntimeArchitectureError
from samcli.lib.utils.architecture import (
ARM64,
InvalidArchitecture,
validate_architecture,
X86_64,
validate_architecture_runtime,
has_runtime_multi_arch_image,
)
from samcli.lib.utils.packagetype import ZIP, IMAGE
class TestArchitecture(TestCase):
"""
Tests for samcli.lib.utils.architecture
"""
def test_validate_architecture(self):
"""
Passing values
"""
validate_architecture(ARM64)
validate_architecture(X86_64)
@parameterized.expand([(None,), (""), ("unknown")])
def test_validate_architecture_errors(self, value):
"""
Invalid values
Parameters
----------
value : str
Value
"""
with self.assertRaises(InvalidArchitecture):
validate_architecture(value)
@parameterized.expand(
[
("nodejs14.x", X86_64, ZIP),
("java8.al2", ARM64, ZIP),
("dotnet6", ARM64, ZIP),
(None, X86_64, IMAGE),
(None, ARM64, IMAGE),
(None, X86_64, IMAGE),
]
)
def test_must_pass_for_support_runtime_architecture(self, runtime, arch, packagetype):
function = Mock(
functionname="name", handler="app.handler", runtime=runtime, packagetype=packagetype, architectures=[arch]
)
validate_architecture_runtime(function)
@parameterized.expand(
[
("python3.7", ARM64),
("java8", ARM64),
("go1.x", ARM64),
("provided", ARM64),
]
)
def test_must_raise_for_unsupported_runtime_architecture(self, runtime, arch):
function = Mock(
functionname="name", handler="app.handler", runtime=runtime, architectures=[arch], packagetype=ZIP
)
with self.assertRaises(UnsupportedRuntimeArchitectureError) as ex:
validate_architecture_runtime(function)
self.assertEqual(str(ex.exception), f"Runtime {runtime} is not supported on '{arch}' architecture")
@parameterized.expand([("python3.8", True), ("python3.9", True)])
def test_multi_arch_image(self, runtime, result):
self.assertEqual(has_runtime_multi_arch_image(runtime), result)
|
088ac256a3cf15785085c494bc69912280c63774
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/PrismaCloud/Integrations/PrismaCloudIAM/PrismaCloudIAM_test.py
|
ddf486565194d86947daf93ba2b32da169f0e000
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 13,964
|
py
|
PrismaCloudIAM_test.py
|
from requests import Response, Session
from PrismaCloudIAM import Client, get_mapping_fields
from IAMApiModule import *
APP_USER_OUTPUT = {
"user_id": "mock_id",
"user_name": "mock_user_name",
"first_name": "mock_first_name",
"last_name": "mock_last_name",
"active": "true",
"email": "testdemisto2@paloaltonetworks.com"
}
USER_APP_DATA = IAMUserAppData("mock_id", "mock_user_name", is_active=True, app_data=APP_USER_OUTPUT)
APP_DISABLED_USER_OUTPUT = {
"user_id": "mock_id",
"user_name": "mock_user_name",
"first_name": "mock_first_name",
"last_name": "mock_last_name",
"active": "false",
"email": "testdemisto2@paloaltonetworks.com"
}
DISABLED_USER_APP_DATA = IAMUserAppData("mock_id", "mock_user_name", is_active=False, app_data=APP_DISABLED_USER_OUTPUT)
# helpers methods
def mock_client(mocker):
mocker.patch.object(Client, 'login', return_value={})
client = Client(base_url='https://test.com',
username='test',
password='test',
customer_name='test',
headers={})
return client
def get_outputs_from_user_profile(user_profile):
entry_context = user_profile.to_entry()
outputs = entry_context.get('Contents')
return outputs
# tests classes
class TestGetUserCommand:
"""
Class to group the get user commands test
"""
def test_get_user_command__existing_user(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a user
When:
- The user exists in the application
- Calling function get_user_command
Then:
- Ensure the resulted User Profile object holds the correct user details
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}}
mocker.patch.object(client, 'get_user', return_value=USER_APP_DATA)
mocker.patch.object(IAMUserProfile, 'update_with_app_data', return_value={})
user_profile = IAMCommand().get_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.GET_USER
assert outputs.get('success') is True
assert outputs.get('active') is True
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
def test_get_user_command__non_existing_user(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email a user
When:
- The user does not exist in the application
- Calling function get_user_command
Then:
- Ensure the resulted User Profile object holds information about an unsuccessful result.
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}}
mocker.patch.object(client, 'get_user', return_value=None)
user_profile = IAMCommand().get_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.GET_USER
assert outputs.get('success') is False
assert outputs.get('errorCode') == IAMErrors.USER_DOES_NOT_EXIST[0]
assert outputs.get('errorMessage') == IAMErrors.USER_DOES_NOT_EXIST[1]
def test_get_user_command__bad_response(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a non-existing user in the application
When:
- Calling function get_user_command
- A bad response (500) is returned from the application API
Then:
- Ensure the resulted User Profile object holds information about the bad response.
"""
import demistomock as demisto
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}}
bad_response = Response()
bad_response.headers = {'x-redlock-status': json.dumps([{'i18nKey': 'bad request'}])}
bad_response.status_code = 500
mocker.patch.object(demisto, 'error')
mocker.patch.object(Session, 'request', return_value=bad_response)
user_profile = IAMCommand().get_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.GET_USER
assert outputs.get('success') is False
assert outputs.get('errorCode') == 500
assert 'Error in API call [500] - bad request' in outputs.get('errorMessage')
class TestCreateUserCommand:
"""
Class to group the create user commands test
"""
def test_create_user_command__success(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a non-existing user in the application
When:
- Calling function create_user_command
Then:
- Ensure a User Profile object with the user data is returned
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}}
mocker.patch.object(client, 'get_user', return_value=None)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={})
mocker.patch.object(client, 'create_user', return_value=USER_APP_DATA)
user_profile = IAMCommand().create_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is True
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
def test_create_user_command__user_already_exists(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a user
When:
- The user already exists in the application and disabled
- allow-enable argument is false
- Calling function create_user_command
Then:
- Ensure the command is considered successful and the user is still disabled
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}, 'allow-enable': 'false'}
mocker.patch.object(client, 'get_user', return_value=DISABLED_USER_APP_DATA)
mocker.patch.object(client, 'update_user', return_value=DISABLED_USER_APP_DATA)
user_profile = IAMCommand().create_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.UPDATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is False
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
class TestUpdateUserCommand:
"""
Class to group the update user commands test
"""
def test_update_user_command__non_existing_user(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains user data
When:
- The user does not exist in the application
- create-if-not-exists parameter is checked
- Create User command is enabled
- Calling function update_user_command
Then:
- Ensure the create action is executed
- Ensure a User Profile object with the user data is returned
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', 'givenname': 'mock_first_name'}}
mocker.patch.object(client, 'get_user', return_value=None)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={})
mocker.patch.object(client, 'create_user', return_value=USER_APP_DATA)
user_profile = IAMCommand(create_if_not_exists=True).update_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is True
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
def test_update_user_command__command_is_disabled(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains user data
When:
- Update User command is disabled
- Calling function update_user_command
Then:
- Ensure the command is considered successful and skipped
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', 'givenname': 'mock_first_name'}}
mocker.patch.object(client, 'get_user', return_value=None)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={})
mocker.patch.object(client, 'update_user', return_value=USER_APP_DATA)
user_profile = IAMCommand(is_update_enabled=False).update_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.UPDATE_USER
assert outputs.get('success') is True
assert outputs.get('skipped') is True
assert outputs.get('reason') == 'Command is disabled.'
def test_update_user_command__allow_enable(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains user data
When:
- The user is disabled in the application
- allow-enable argument is true
- Calling function update_user_command
Then:
- Ensure the user is enabled at the end of the command execution.
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', 'givenname': 'mock_first_name'},
'allow-enable': 'true'}
mocker.patch.object(client, 'get_user', return_value=DISABLED_USER_APP_DATA)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={})
mocker.patch.object(client, 'enable_user', return_value=USER_APP_DATA)
mocker.patch.object(client, 'update_user', return_value=USER_APP_DATA)
user_profile = IAMCommand().update_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.UPDATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is True
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
class TestDisableUserTest:
"""
Class to group the disable user commands test
"""
def test_disable_user_command__non_existing_user(self, mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a user
When:
- create-if-not-exists parameter is unchecked
- The user does not exist in the application
- Calling function disable_user_command
Then:
- Ensure the command is considered successful and skipped
"""
client = mock_client(mocker)
args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}}
mocker.patch.object(client, 'get_user', return_value=None)
user_profile = IAMCommand().disable_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.DISABLE_USER
assert outputs.get('success') is True
assert outputs.get('skipped') is True
assert outputs.get('reason') == IAMErrors.USER_DOES_NOT_EXIST[1]
class TestGetMappingFieldsTest:
"""
Class to group the disable user commands test
"""
def test_get_mapping_fields_command(self, mocker):
"""
Given:
- An app client object
When:
- User schema in the application contains the fields 'field1' and 'field2'
- Calling function get_mapping_fields_command
Then:
- Ensure a GetMappingFieldsResponse object that contains the application fields is returned
"""
client = mock_client(mocker)
mocker.patch.object(client, 'get_app_fields', return_value={'field1': 'desc1', 'field2': 'desc2'})
mapping_response = get_mapping_fields(client)
mapping = mapping_response.extract_mapping()
assert mapping.get(IAMUserProfile.DEFAULT_INCIDENT_TYPE, {}).get('field1') == 'desc1'
assert mapping.get(IAMUserProfile.DEFAULT_INCIDENT_TYPE, {}).get('field2') == 'desc2'
|
edbe746710ae80812d7618940092b25068883542
|
64e5f76a15d0178e851d64573196a33043e68164
|
/lib/build/__init__.py
|
35375e8659eb76fe9650f934ec0fe24306c43d9d
|
[
"BSD-2-Clause"
] |
permissive
|
ganeti/ganeti
|
759aa20d2d3e15c816fa3ba5019d7d143b2d1294
|
456ea285a7583183c2c8e5bcffe9006ec8a9d658
|
refs/heads/master
| 2023-07-24T21:23:49.389657
| 2023-05-25T15:37:44
| 2023-05-28T10:17:36
| 25,163,509
| 465
| 126
|
BSD-2-Clause
| 2023-08-18T17:06:33
| 2014-10-13T15:03:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
__init__.py
|
#
#
# Copyright (C) 2009 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module used during the Ganeti build process"""
import imp
import os
def LoadModule(filename):
"""Loads an external module by filename.
Use this function with caution. Python will always write the compiled source
to a file named "${filename}c".
@type filename: string
@param filename: Path to module
"""
(name, ext) = os.path.splitext(filename)
fh = open(filename, "r")
try:
return imp.load_module(name, fh, filename, (ext, "r", imp.PY_SOURCE))
finally:
fh.close()
|
617a409f3cafef6c5aa10cd598b83f652067c8bc
|
c0e45bc202a50f4b0dcc645b5f805596d10958b8
|
/losses.py
|
7404b39e49a493237d9ecf7dbb5cd66dcf922b8d
|
[
"Apache-2.0"
] |
permissive
|
visinf/self-mono-sf
|
532d3a09ebdce9abb4177517ba521f5f2dc66687
|
eec356d95038da49e6705194e6dc0780b750f2b0
|
refs/heads/master
| 2022-05-17T07:47:37.688855
| 2022-04-18T07:45:27
| 2022-04-18T07:45:27
| 246,596,511
| 240
| 55
|
Apache-2.0
| 2022-04-18T07:45:28
| 2020-03-11T14:40:46
|
Python
|
UTF-8
|
Python
| false
| false
| 60,956
|
py
|
losses.py
|
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as tf
from models.forwardwarp_package.forward_warp import forward_warp
from utils.interpolation import interpolate2d_as
from utils.sceneflow_util import pixel2pts_ms, pts2pixel_ms, reconstructImg, reconstructPts, projectSceneFlow2Flow
from utils.monodepth_eval import compute_errors, compute_d1_all
from models.modules_sceneflow import WarpingLayer_Flow
###############################################
## Basic Module
###############################################
def _elementwise_epe(input_flow, target_flow):
residual = target_flow - input_flow
return torch.norm(residual, p=2, dim=1, keepdim=True)
def _elementwise_l1(input_flow, target_flow):
residual = target_flow - input_flow
return torch.norm(residual, p=1, dim=1, keepdim=True)
def _elementwise_robust_epe_char(input_flow, target_flow):
residual = target_flow - input_flow
return torch.pow(torch.norm(residual, p=2, dim=1, keepdim=True) + 0.01, 0.4)
def _SSIM(x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = nn.AvgPool2d(3, 1)(x)
mu_y = nn.AvgPool2d(3, 1)(y)
mu_x_mu_y = mu_x * mu_y
mu_x_sq = mu_x.pow(2)
mu_y_sq = mu_y.pow(2)
sigma_x = nn.AvgPool2d(3, 1)(x * x) - mu_x_sq
sigma_y = nn.AvgPool2d(3, 1)(y * y) - mu_y_sq
sigma_xy = nn.AvgPool2d(3, 1)(x * y) - mu_x_mu_y
SSIM_n = (2 * mu_x_mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x_sq + mu_y_sq + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
SSIM_img = torch.clamp((1 - SSIM) / 2, 0, 1)
return tf.pad(SSIM_img, pad=(1, 1, 1, 1), mode='constant', value=0)
def _apply_disparity(img, disp):
batch_size, _, height, width = img.size()
# Original coordinates of pixels
x_base = torch.linspace(0, 1, width).repeat(batch_size, height, 1).type_as(img)
y_base = torch.linspace(0, 1, height).repeat(batch_size, width, 1).transpose(1, 2).type_as(img)
# Apply shift in X direction
x_shifts = disp[:, 0, :, :] # Disparity is passed in NCHW format with 1 channel
flow_field = torch.stack((x_base + x_shifts, y_base), dim=3)
# In grid_sample coordinates are assumed to be between -1 and 1
output = tf.grid_sample(img, 2 * flow_field - 1, mode='bilinear', padding_mode='zeros')
return output
def _generate_image_left(img, disp):
return _apply_disparity(img, -disp)
def _adaptive_disocc_detection(flow):
# init mask
b, _, h, w, = flow.size()
mask = torch.ones(b, 1, h, w, dtype=flow.dtype, device=flow.device).float().requires_grad_(False)
flow = flow.transpose(1, 2).transpose(2, 3)
disocc = torch.clamp(forward_warp()(mask, flow), 0, 1)
disocc_map = (disocc > 0.5)
if disocc_map.float().sum() < (b * h * w / 2):
disocc_map = torch.ones(b, 1, h, w, dtype=torch.bool, device=flow.device).requires_grad_(False)
return disocc_map
def _adaptive_disocc_detection_disp(disp):
# # init
b, _, h, w, = disp.size()
mask = torch.ones(b, 1, h, w, dtype=disp.dtype, device=disp.device).float().requires_grad_(False)
flow = torch.zeros(b, 2, h, w, dtype=disp.dtype, device=disp.device).float().requires_grad_(False)
flow[:, 0:1, :, : ] = disp * w
flow = flow.transpose(1, 2).transpose(2, 3)
disocc = torch.clamp(forward_warp()(mask, flow), 0, 1)
disocc_map = (disocc > 0.5)
if disocc_map.float().sum() < (b * h * w / 2):
disocc_map = torch.ones(b, 1, h, w, dtype=torch.bool, device=disp.device).requires_grad_(False)
return disocc_map
def _gradient_x(img):
img = tf.pad(img, (0, 1, 0, 0), mode="replicate")
gx = img[:, :, :, :-1] - img[:, :, :, 1:] # NCHW
return gx
def _gradient_y(img):
img = tf.pad(img, (0, 0, 0, 1), mode="replicate")
gy = img[:, :, :-1, :] - img[:, :, 1:, :] # NCHW
return gy
def _gradient_x_2nd(img):
img_l = tf.pad(img, (1, 0, 0, 0), mode="replicate")[:, :, :, :-1]
img_r = tf.pad(img, (0, 1, 0, 0), mode="replicate")[:, :, :, 1:]
gx = img_l + img_r - 2 * img
return gx
def _gradient_y_2nd(img):
img_t = tf.pad(img, (0, 0, 1, 0), mode="replicate")[:, :, :-1, :]
img_b = tf.pad(img, (0, 0, 0, 1), mode="replicate")[:, :, 1:, :]
gy = img_t + img_b - 2 * img
return gy
def _smoothness_motion_2nd(sf, img, beta=1):
sf_grad_x = _gradient_x_2nd(sf)
sf_grad_y = _gradient_y_2nd(sf)
img_grad_x = _gradient_x(img)
img_grad_y = _gradient_y(img)
weights_x = torch.exp(-torch.mean(torch.abs(img_grad_x), 1, keepdim=True) * beta)
weights_y = torch.exp(-torch.mean(torch.abs(img_grad_y), 1, keepdim=True) * beta)
smoothness_x = sf_grad_x * weights_x
smoothness_y = sf_grad_y * weights_y
return (smoothness_x.abs() + smoothness_y.abs())
def _disp2depth_kitti_K(disp, k_value):
mask = (disp > 0).float()
depth = k_value.unsqueeze(1).unsqueeze(1).unsqueeze(1) * 0.54 / (disp + (1.0 - mask))
return depth
def _depth2disp_kitti_K(depth, k_value):
disp = k_value.unsqueeze(1).unsqueeze(1).unsqueeze(1) * 0.54 / depth
return disp
###############################################
## Loss function
###############################################
class Loss_SceneFlow_SelfSup(nn.Module):
def __init__(self, args):
super(Loss_SceneFlow_SelfSup, self).__init__()
self._weights = [4.0, 2.0, 1.0, 1.0, 1.0]
self._ssim_w = 0.85
self._disp_smooth_w = 0.1
self._sf_3d_pts = 0.2
self._sf_3d_sm = 200
def depth_loss_left_img(self, disp_l, disp_r, img_l_aug, img_r_aug, ii):
img_r_warp = _generate_image_left(img_r_aug, disp_l)
left_occ = _adaptive_disocc_detection_disp(disp_r).detach()
## Photometric loss
img_diff = (_elementwise_l1(img_l_aug, img_r_warp) * (1.0 - self._ssim_w) + _SSIM(img_l_aug, img_r_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_img = (img_diff[left_occ]).mean()
img_diff[~left_occ].detach_()
## Disparities smoothness
loss_smooth = _smoothness_motion_2nd(disp_l, img_l_aug, beta=10.0).mean() / (2 ** ii)
return loss_img + self._disp_smooth_w * loss_smooth, left_occ
def sceneflow_loss(self, sf_f, sf_b, disp_l1, disp_l2, disp_occ_l1, disp_occ_l2, k_l1_aug, k_l2_aug, img_l1_aug, img_l2_aug, aug_size, ii):
_, _, h_dp, w_dp = sf_f.size()
disp_l1 = disp_l1 * w_dp
disp_l2 = disp_l2 * w_dp
## scale
local_scale = torch.zeros_like(aug_size)
local_scale[:, 0] = h_dp
local_scale[:, 1] = w_dp
pts1, k1_scale = pixel2pts_ms(k_l1_aug, disp_l1, local_scale / aug_size)
pts2, k2_scale = pixel2pts_ms(k_l2_aug, disp_l2, local_scale / aug_size)
_, pts1_tf, coord1 = pts2pixel_ms(k1_scale, pts1, sf_f, [h_dp, w_dp])
_, pts2_tf, coord2 = pts2pixel_ms(k2_scale, pts2, sf_b, [h_dp, w_dp])
pts2_warp = reconstructPts(coord1, pts2)
pts1_warp = reconstructPts(coord2, pts1)
flow_f = projectSceneFlow2Flow(k1_scale, sf_f, disp_l1)
flow_b = projectSceneFlow2Flow(k2_scale, sf_b, disp_l2)
occ_map_b = _adaptive_disocc_detection(flow_f).detach() * disp_occ_l2
occ_map_f = _adaptive_disocc_detection(flow_b).detach() * disp_occ_l1
## Image reconstruction loss
img_l2_warp = reconstructImg(coord1, img_l2_aug)
img_l1_warp = reconstructImg(coord2, img_l1_aug)
img_diff1 = (_elementwise_l1(img_l1_aug, img_l2_warp) * (1.0 - self._ssim_w) + _SSIM(img_l1_aug, img_l2_warp) * self._ssim_w).mean(dim=1, keepdim=True)
img_diff2 = (_elementwise_l1(img_l2_aug, img_l1_warp) * (1.0 - self._ssim_w) + _SSIM(img_l2_aug, img_l1_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_im1 = img_diff1[occ_map_f].mean()
loss_im2 = img_diff2[occ_map_b].mean()
img_diff1[~occ_map_f].detach_()
img_diff2[~occ_map_b].detach_()
loss_im = loss_im1 + loss_im2
## Point reconstruction Loss
pts_norm1 = torch.norm(pts1, p=2, dim=1, keepdim=True)
pts_norm2 = torch.norm(pts2, p=2, dim=1, keepdim=True)
pts_diff1 = _elementwise_epe(pts1_tf, pts2_warp).mean(dim=1, keepdim=True) / (pts_norm1 + 1e-8)
pts_diff2 = _elementwise_epe(pts2_tf, pts1_warp).mean(dim=1, keepdim=True) / (pts_norm2 + 1e-8)
loss_pts1 = pts_diff1[occ_map_f].mean()
loss_pts2 = pts_diff2[occ_map_b].mean()
pts_diff1[~occ_map_f].detach_()
pts_diff2[~occ_map_b].detach_()
loss_pts = loss_pts1 + loss_pts2
## 3D motion smoothness loss
loss_3d_s = ( (_smoothness_motion_2nd(sf_f, img_l1_aug, beta=10.0) / (pts_norm1 + 1e-8)).mean() + (_smoothness_motion_2nd(sf_b, img_l2_aug, beta=10.0) / (pts_norm2 + 1e-8)).mean() ) / (2 ** ii)
## Loss Summnation
sceneflow_loss = loss_im + self._sf_3d_pts * loss_pts + self._sf_3d_sm * loss_3d_s
return sceneflow_loss, loss_im, loss_pts, loss_3d_s
def detaching_grad_of_outputs(self, output_dict):
for ii in range(0, len(output_dict['flow_f'])):
output_dict['flow_f'][ii].detach_()
output_dict['flow_b'][ii].detach_()
output_dict['disp_l1'][ii].detach_()
output_dict['disp_l2'][ii].detach_()
return None
def forward(self, output_dict, target_dict):
loss_dict = {}
batch_size = target_dict['input_l1'].size(0)
loss_sf_sum = 0
loss_dp_sum = 0
loss_sf_2d = 0
loss_sf_3d = 0
loss_sf_sm = 0
k_l1_aug = target_dict['input_k_l1_aug']
k_l2_aug = target_dict['input_k_l2_aug']
aug_size = target_dict['aug_size']
disp_r1_dict = output_dict['output_dict_r']['disp_l1']
disp_r2_dict = output_dict['output_dict_r']['disp_l2']
for ii, (sf_f, sf_b, disp_l1, disp_l2, disp_r1, disp_r2) in enumerate(zip(output_dict['flow_f'], output_dict['flow_b'], output_dict['disp_l1'], output_dict['disp_l2'], disp_r1_dict, disp_r2_dict)):
assert(sf_f.size()[2:4] == sf_b.size()[2:4])
assert(sf_f.size()[2:4] == disp_l1.size()[2:4])
assert(sf_f.size()[2:4] == disp_l2.size()[2:4])
## For image reconstruction loss
img_l1_aug = interpolate2d_as(target_dict["input_l1_aug"], sf_f)
img_l2_aug = interpolate2d_as(target_dict["input_l2_aug"], sf_b)
img_r1_aug = interpolate2d_as(target_dict["input_r1_aug"], sf_f)
img_r2_aug = interpolate2d_as(target_dict["input_r2_aug"], sf_b)
## Disp Loss
loss_disp_l1, disp_occ_l1 = self.depth_loss_left_img(disp_l1, disp_r1, img_l1_aug, img_r1_aug, ii)
loss_disp_l2, disp_occ_l2 = self.depth_loss_left_img(disp_l2, disp_r2, img_l2_aug, img_r2_aug, ii)
loss_dp_sum = loss_dp_sum + (loss_disp_l1 + loss_disp_l2) * self._weights[ii]
## Sceneflow Loss
loss_sceneflow, loss_im, loss_pts, loss_3d_s = self.sceneflow_loss(sf_f, sf_b,
disp_l1, disp_l2,
disp_occ_l1, disp_occ_l2,
k_l1_aug, k_l2_aug,
img_l1_aug, img_l2_aug,
aug_size, ii)
loss_sf_sum = loss_sf_sum + loss_sceneflow * self._weights[ii]
loss_sf_2d = loss_sf_2d + loss_im
loss_sf_3d = loss_sf_3d + loss_pts
loss_sf_sm = loss_sf_sm + loss_3d_s
# finding weight
f_loss = loss_sf_sum.detach()
d_loss = loss_dp_sum.detach()
max_val = torch.max(f_loss, d_loss)
f_weight = max_val / f_loss
d_weight = max_val / d_loss
total_loss = loss_sf_sum * f_weight + loss_dp_sum * d_weight
loss_dict = {}
loss_dict["dp"] = loss_dp_sum
loss_dict["sf"] = loss_sf_sum
loss_dict["s_2"] = loss_sf_2d
loss_dict["s_3"] = loss_sf_3d
loss_dict["s_3s"] = loss_sf_sm
loss_dict["total_loss"] = total_loss
self.detaching_grad_of_outputs(output_dict['output_dict_r'])
return loss_dict
class Loss_SceneFlow_SemiSupFinetune(nn.Module):
def __init__(self, args):
super(Loss_SceneFlow_SemiSupFinetune, self).__init__()
self._weights = [4.0, 2.0, 1.0, 1.0, 1.0]
self._unsup_loss = Loss_SceneFlow_SelfSup(args)
def forward(self, output_dict, target_dict):
loss_dict = {}
unsup_loss_dict = self._unsup_loss(output_dict, target_dict)
unsup_loss = unsup_loss_dict['total_loss']
## Ground Truth
gt_disp1 = target_dict['target_disp']
gt_disp1_mask = (target_dict['target_disp_mask']==1).float()
gt_disp2 = target_dict['target_disp2_occ']
gt_disp2_mask = (target_dict['target_disp2_mask_occ']==1).float()
gt_flow = target_dict['target_flow']
gt_flow_mask = (target_dict['target_flow_mask']==1).float()
b, _, h_dp, w_dp = gt_disp1.size()
disp_loss = 0
flow_loss = 0
for ii, sf_f in enumerate(output_dict['flow_f_pp']):
## disp1
disp_l1 = interpolate2d_as(output_dict["disp_l1_pp"][ii], gt_disp1, mode="bilinear") * w_dp
valid_abs_rel = torch.abs(gt_disp1 - disp_l1) * gt_disp1_mask
valid_abs_rel[gt_disp1_mask == 0].detach_()
disp_l1_loss = valid_abs_rel[gt_disp1_mask != 0].mean()
## Flow Loss
sf_f_up = interpolate2d_as(sf_f, gt_flow, mode="bilinear")
out_flow = projectSceneFlow2Flow(target_dict['input_k_l1'], sf_f_up, disp_l1)
valid_epe = _elementwise_robust_epe_char(out_flow, gt_flow) * gt_flow_mask
valid_epe[gt_flow_mask == 0].detach_()
flow_l1_loss = valid_epe[gt_flow_mask != 0].mean()
## disp1_next
out_depth_l1 = _disp2depth_kitti_K(disp_l1, target_dict['input_k_l1'][:, 0, 0])
out_depth_l1 = torch.clamp(out_depth_l1, 1e-3, 80)
out_depth_l1_next = out_depth_l1 + sf_f_up[:, 2:3, :, :]
disp_l1_next = _depth2disp_kitti_K(out_depth_l1_next, target_dict['input_k_l1'][:, 0, 0])
valid_abs_rel = torch.abs(gt_disp2 - disp_l1_next) * gt_disp2_mask
valid_abs_rel[gt_disp2_mask == 0].detach_()
disp_l2_loss = valid_abs_rel[gt_disp2_mask != 0].mean()
disp_loss = disp_loss + (disp_l1_loss + disp_l2_loss) * self._weights[ii]
flow_loss = flow_loss + flow_l1_loss * self._weights[ii]
# finding weight
u_loss = unsup_loss.detach()
d_loss = disp_loss.detach()
f_loss = flow_loss.detach()
max_val = torch.max(torch.max(f_loss, d_loss), u_loss)
u_weight = max_val / u_loss
d_weight = max_val / d_loss
f_weight = max_val / f_loss
total_loss = unsup_loss * u_weight + disp_loss * d_weight + flow_loss * f_weight
loss_dict["unsup_loss"] = unsup_loss
loss_dict["dp_loss"] = disp_loss
loss_dict["fl_loss"] = flow_loss
loss_dict["total_loss"] = total_loss
return loss_dict
###############################################
## Eval
###############################################
def eval_module_disp_depth(gt_disp, gt_disp_mask, output_disp, gt_depth, output_depth):
loss_dict = {}
batch_size = gt_disp.size(0)
gt_disp_mask_f = gt_disp_mask.float()
## KITTI disparity metric
d_valid_epe = _elementwise_epe(output_disp, gt_disp) * gt_disp_mask_f
d_outlier_epe = (d_valid_epe > 3).float() * ((d_valid_epe / gt_disp) > 0.05).float() * gt_disp_mask_f
loss_dict["otl"] = (d_outlier_epe.view(batch_size, -1).sum(1)).mean() / 91875.68
loss_dict["otl_img"] = d_outlier_epe
## MonoDepth metric
abs_rel, sq_rel, rms, log_rms, a1, a2, a3 = compute_errors(gt_depth[gt_disp_mask], output_depth[gt_disp_mask])
loss_dict["abs_rel"] = abs_rel
loss_dict["sq_rel"] = sq_rel
loss_dict["rms"] = rms
loss_dict["log_rms"] = log_rms
loss_dict["a1"] = a1
loss_dict["a2"] = a2
loss_dict["a3"] = a3
return loss_dict
class Eval_MonoDepth_Eigen(nn.Module):
def __init__(self):
super(Eval_MonoDepth_Eigen, self).__init__()
def forward(self, output_dict, target_dict):
loss_dict = {}
## Depth Eval
gt_depth = target_dict['target_depth']
out_disp_l1 = interpolate2d_as(output_dict["disp_l1_pp"][0], gt_depth, mode="bilinear") * gt_depth.size(3)
out_depth_l1 = _disp2depth_kitti_K(out_disp_l1, target_dict['input_k_l1'][:, 0, 0])
out_depth_l1 = torch.clamp(out_depth_l1, 1e-3, 80)
gt_depth_mask = (gt_depth > 1e-3) * (gt_depth < 80)
## Compute metrics
abs_rel, sq_rel, rms, log_rms, a1, a2, a3 = compute_errors(gt_depth[gt_depth_mask], out_depth_l1[gt_depth_mask])
output_dict["out_disp_l_pp"] = out_disp_l1
output_dict["out_depth_l_pp"] = out_depth_l1
loss_dict["ab_r"] = abs_rel
loss_dict["sq_r"] = sq_rel
loss_dict["rms"] = rms
loss_dict["log_rms"] = log_rms
loss_dict["a1"] = a1
loss_dict["a2"] = a2
loss_dict["a3"] = a3
return loss_dict
class Eval_SceneFlow_KITTI_Test(nn.Module):
def __init__(self):
super(Eval_SceneFlow_KITTI_Test, self).__init__()
def forward(self, output_dict, target_dict):
loss_dict = {}
##################################################
## Depth 1
##################################################
input_l1 = target_dict['input_l1']
intrinsics = target_dict['input_k_l1']
out_disp_l1 = interpolate2d_as(output_dict["disp_l1_pp"][0], input_l1, mode="bilinear") * input_l1.size(3)
out_depth_l1 = _disp2depth_kitti_K(out_disp_l1, intrinsics[:, 0, 0])
out_depth_l1 = torch.clamp(out_depth_l1, 1e-3, 80)
output_dict["out_disp_l_pp"] = out_disp_l1
##################################################
## Optical Flow Eval
##################################################
out_sceneflow = interpolate2d_as(output_dict['flow_f_pp'][0], input_l1, mode="bilinear")
out_flow = projectSceneFlow2Flow(target_dict['input_k_l1'], out_sceneflow, output_dict["out_disp_l_pp"])
output_dict["out_flow_pp"] = out_flow
##################################################
## Depth 2
##################################################
out_depth_l1_next = out_depth_l1 + out_sceneflow[:, 2:3, :, :]
out_disp_l1_next = _depth2disp_kitti_K(out_depth_l1_next, intrinsics[:, 0, 0])
output_dict["out_disp_l_pp_next"] = out_disp_l1_next
loss_dict['sf'] = (out_disp_l1_next * 0).sum()
return loss_dict
class Eval_SceneFlow_KITTI_Train(nn.Module):
def __init__(self, args):
super(Eval_SceneFlow_KITTI_Train, self).__init__()
def forward(self, output_dict, target_dict):
loss_dict = {}
gt_flow = target_dict['target_flow']
gt_flow_mask = (target_dict['target_flow_mask']==1).float()
gt_disp = target_dict['target_disp']
gt_disp_mask = (target_dict['target_disp_mask']==1).float()
gt_disp2_occ = target_dict['target_disp2_occ']
gt_disp2_mask = (target_dict['target_disp2_mask_occ']==1).float()
gt_sf_mask = gt_flow_mask * gt_disp_mask * gt_disp2_mask
intrinsics = target_dict['input_k_l1']
##################################################
## Depth 1
##################################################
batch_size, _, _, width = gt_disp.size()
out_disp_l1 = interpolate2d_as(output_dict["disp_l1_pp"][0], gt_disp, mode="bilinear") * width
out_depth_l1 = _disp2depth_kitti_K(out_disp_l1, intrinsics[:, 0, 0])
out_depth_l1 = torch.clamp(out_depth_l1, 1e-3, 80)
gt_depth_l1 = _disp2depth_kitti_K(gt_disp, intrinsics[:, 0, 0])
dict_disp0_occ = eval_module_disp_depth(gt_disp, gt_disp_mask.bool(), out_disp_l1, gt_depth_l1, out_depth_l1)
output_dict["out_disp_l_pp"] = out_disp_l1
output_dict["out_depth_l_pp"] = out_depth_l1
d0_outlier_image = dict_disp0_occ['otl_img']
loss_dict["d_abs"] = dict_disp0_occ['abs_rel']
loss_dict["d_sq"] = dict_disp0_occ['sq_rel']
loss_dict["d1"] = dict_disp0_occ['otl']
##################################################
## Optical Flow Eval
##################################################
out_sceneflow = interpolate2d_as(output_dict['flow_f_pp'][0], gt_flow, mode="bilinear")
out_flow = projectSceneFlow2Flow(target_dict['input_k_l1'], out_sceneflow, output_dict["out_disp_l_pp"])
## Flow Eval
valid_epe = _elementwise_epe(out_flow, gt_flow) * gt_flow_mask
loss_dict["f_epe"] = (valid_epe.view(batch_size, -1).sum(1)).mean() / 91875.68
output_dict["out_flow_pp"] = out_flow
flow_gt_mag = torch.norm(target_dict["target_flow"], p=2, dim=1, keepdim=True) + 1e-8
flow_outlier_epe = (valid_epe > 3).float() * ((valid_epe / flow_gt_mag) > 0.05).float() * gt_flow_mask
loss_dict["f1"] = (flow_outlier_epe.view(batch_size, -1).sum(1)).mean() / 91875.68
##################################################
## Depth 2
##################################################
out_depth_l1_next = out_depth_l1 + out_sceneflow[:, 2:3, :, :]
out_disp_l1_next = _depth2disp_kitti_K(out_depth_l1_next, intrinsics[:, 0, 0])
gt_depth_l1_next = _disp2depth_kitti_K(gt_disp2_occ, intrinsics[:, 0, 0])
dict_disp1_occ = eval_module_disp_depth(gt_disp2_occ, gt_disp2_mask.bool(), out_disp_l1_next, gt_depth_l1_next, out_depth_l1_next)
output_dict["out_disp_l_pp_next"] = out_disp_l1_next
output_dict["out_depth_l_pp_next"] = out_depth_l1_next
d1_outlier_image = dict_disp1_occ['otl_img']
loss_dict["d2"] = dict_disp1_occ['otl']
##################################################
## Scene Flow Eval
##################################################
outlier_sf = (flow_outlier_epe.bool() + d0_outlier_image.bool() + d1_outlier_image.bool()).float() * gt_sf_mask
loss_dict["sf"] = (outlier_sf.view(batch_size, -1).sum(1)).mean() / 91873.4
return loss_dict
###############################################
## Ablation - Loss_SceneFlow_SelfSup
###############################################
class Loss_SceneFlow_SelfSup_NoOcc(nn.Module):
def __init__(self, args):
super(Loss_SceneFlow_SelfSup_NoOcc, self).__init__()
self._weights = [4.0, 2.0, 1.0, 1.0, 1.0]
self._ssim_w = 0.85
self._disp_smooth_w = 0.1
self._sf_3d_pts = 0.2
self._sf_3d_sm = 200
def depth_loss_left_img(self, disp_l, disp_r, img_l_aug, img_r_aug, ii):
img_r_warp = _generate_image_left(img_r_aug, disp_l)
# left_occ = _adaptive_disocc_detection_disp(disp_r).detach()
## Photometric loss:
img_diff = (_elementwise_l1(img_l_aug, img_r_warp) * (1.0 - self._ssim_w) + _SSIM(img_l_aug, img_r_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_img = img_diff.mean()
# loss_img = (img_diff[left_occ]).mean()
# img_diff[~left_occ].detach_()
## Disparities smoothness
loss_smooth = _smoothness_motion_2nd(disp_l, img_l_aug, beta=10.0).mean() / (2 ** ii)
return loss_img + self._disp_smooth_w * loss_smooth#, left_occ
def sceneflow_loss(self, sf_f, sf_b, disp_l1, disp_l2, k_l1_aug, k_l2_aug, img_l1_aug, img_l2_aug, aug_size, ii):
## Depth2Pts
_, _, h_dp, w_dp = sf_f.size()
disp_l1 = disp_l1 * w_dp
disp_l2 = disp_l2 * w_dp
## scale
local_scale = torch.zeros_like(aug_size)
local_scale[:, 0] = h_dp
local_scale[:, 1] = w_dp
pts1, k1_scale = pixel2pts_ms(k_l1_aug, disp_l1, local_scale / aug_size)
pts2, k2_scale = pixel2pts_ms(k_l2_aug, disp_l2, local_scale / aug_size)
_, pts1_tf, coord1 = pts2pixel_ms(k1_scale, pts1, sf_f, [h_dp, w_dp])
_, pts2_tf, coord2 = pts2pixel_ms(k2_scale, pts2, sf_b, [h_dp, w_dp])
pts2_warp = reconstructPts(coord1, pts2)
pts1_warp = reconstructPts(coord2, pts1)
flow_f = projectSceneFlow2Flow(k1_scale, sf_f, disp_l1)
flow_b = projectSceneFlow2Flow(k2_scale, sf_b, disp_l2)
# occ_map_b = _adaptive_disocc_detection(flow_f).detach() * disp_occ_l2
# occ_map_f = _adaptive_disocc_detection(flow_b).detach() * disp_occ_l1
## Image reconstruction loss
# img_l2_warp = self.warping_layer_aug(img_l2, flow_f, aug_scale, coords)
# img_l1_warp = self.warping_layer_aug(img_l1, flow_b, aug_scale, coords)
img_l2_warp = reconstructImg(coord1, img_l2_aug)
img_l1_warp = reconstructImg(coord2, img_l1_aug)
img_diff1 = (_elementwise_l1(img_l1_aug, img_l2_warp) * (1.0 - self._ssim_w) + _SSIM(img_l1_aug, img_l2_warp) * self._ssim_w).mean(dim=1, keepdim=True)
img_diff2 = (_elementwise_l1(img_l2_aug, img_l1_warp) * (1.0 - self._ssim_w) + _SSIM(img_l2_aug, img_l1_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_im1 = img_diff1.mean()
loss_im2 = img_diff2.mean()
# loss_im1 = img_diff1[occ_map_f].mean()
# loss_im2 = img_diff2[occ_map_b].mean()
# img_diff1[~occ_map_f].detach_()
# img_diff2[~occ_map_b].detach_()
loss_im = loss_im1 + loss_im2
## Point Reconstruction Loss
pts_norm1 = torch.norm(pts1, p=2, dim=1, keepdim=True)
pts_norm2 = torch.norm(pts2, p=2, dim=1, keepdim=True)
pts_diff1 = _elementwise_epe(pts1_tf, pts2_warp).mean(dim=1, keepdim=True) / (pts_norm1 + 1e-8)
pts_diff2 = _elementwise_epe(pts2_tf, pts1_warp).mean(dim=1, keepdim=True) / (pts_norm2 + 1e-8)
loss_pts1 = pts_diff1.mean()
loss_pts2 = pts_diff2.mean()
# loss_pts1 = pts_diff1[occ_map_f].mean()
# loss_pts2 = pts_diff2[occ_map_b].mean()
# pts_diff1[~occ_map_f].detach_()
# pts_diff2[~occ_map_b].detach_()
loss_pts = loss_pts1 + loss_pts2
## 3D motion smoothness loss
loss_3d_s = ( (_smoothness_motion_2nd(sf_f, img_l1_aug, beta=10.0) / (pts_norm1 + 1e-8)).mean() + (_smoothness_motion_2nd(sf_b, img_l2_aug, beta=10.0) / (pts_norm2 + 1e-8)).mean() ) / (2 ** ii)
## Loss Summnation
sceneflow_loss = loss_im + self._sf_3d_pts * loss_pts + self._sf_3d_sm * loss_3d_s
return sceneflow_loss, loss_im, loss_pts, loss_3d_s
def detaching_grad_of_outputs(self, output_dict):
for ii in range(0, len(output_dict['flow_f'])):
output_dict['flow_f'][ii].detach_()
output_dict['flow_b'][ii].detach_()
output_dict['disp_l1'][ii].detach_()
output_dict['disp_l2'][ii].detach_()
return None
def forward(self, output_dict, target_dict):
loss_dict = {}
## SceneFlow Loss
batch_size = target_dict['input_l1'].size(0)
loss_sf_sum = 0
loss_dp_sum = 0
loss_sf_2d = 0
loss_sf_3d = 0
loss_sf_sm = 0
k_l1_aug = target_dict['input_k_l1_aug']
k_l2_aug = target_dict['input_k_l2_aug']
aug_size = target_dict['aug_size']
disp_r1_dict = output_dict['output_dict_r']['disp_l1']
disp_r2_dict = output_dict['output_dict_r']['disp_l2']
for ii, (sf_f, sf_b, disp_l1, disp_l2, disp_r1, disp_r2) in enumerate(zip(output_dict['flow_f'], output_dict['flow_b'], output_dict['disp_l1'], output_dict['disp_l2'], disp_r1_dict, disp_r2_dict)):
assert(sf_f.size()[2:4] == sf_b.size()[2:4])
assert(sf_f.size()[2:4] == disp_l1.size()[2:4])
assert(sf_f.size()[2:4] == disp_l2.size()[2:4])
## For image reconstruction loss
img_l1_aug = interpolate2d_as(target_dict["input_l1_aug"], sf_f)
img_l2_aug = interpolate2d_as(target_dict["input_l2_aug"], sf_b)
img_r1_aug = interpolate2d_as(target_dict["input_r1_aug"], sf_f)
img_r2_aug = interpolate2d_as(target_dict["input_r2_aug"], sf_b)
## Depth Loss
loss_disp_l1 = self.depth_loss_left_img(disp_l1, disp_r1, img_l1_aug, img_r1_aug, ii)
loss_disp_l2 = self.depth_loss_left_img(disp_l2, disp_r2, img_l2_aug, img_r2_aug, ii)
loss_dp_sum = loss_dp_sum + (loss_disp_l1 + loss_disp_l2) * self._weights[ii]
## Sceneflow Loss
loss_sceneflow, loss_im, loss_pts, loss_3d_s = self.sceneflow_loss(sf_f, sf_b,
disp_l1, disp_l2,
k_l1_aug, k_l2_aug,
img_l1_aug, img_l2_aug,
aug_size, ii)
loss_sf_sum = loss_sf_sum + loss_sceneflow * self._weights[ii]
loss_sf_2d = loss_sf_2d + loss_im
loss_sf_3d = loss_sf_3d + loss_pts
loss_sf_sm = loss_sf_sm + loss_3d_s
# finding weight
f_loss = loss_sf_sum.detach()
d_loss = loss_dp_sum.detach()
max_val = torch.max(f_loss, d_loss)
f_weight = max_val / f_loss
d_weight = max_val / d_loss
total_loss = loss_sf_sum * f_weight + loss_dp_sum * d_weight
loss_dict = {}
loss_dict["dp"] = loss_dp_sum
loss_dict["sf"] = loss_sf_sum
loss_dict["s_2"] = loss_sf_2d
loss_dict["s_3"] = loss_sf_3d
loss_dict["s_3s"] = loss_sf_sm
loss_dict["total_loss"] = total_loss
self.detaching_grad_of_outputs(output_dict['output_dict_r'])
return loss_dict
class Loss_SceneFlow_SelfSup_NoPts(nn.Module):
def __init__(self, args):
super(Loss_SceneFlow_SelfSup_NoPts, self).__init__()
self._weights = [4.0, 2.0, 1.0, 1.0, 1.0]
self._ssim_w = 0.85
self._disp_smooth_w = 0.1
self._sf_3d_pts = 0.2
self._sf_3d_sm = 200
def depth_loss_left_img(self, disp_l, disp_r, img_l_aug, img_r_aug, ii):
img_r_warp = _generate_image_left(img_r_aug, disp_l)
left_occ = _adaptive_disocc_detection_disp(disp_r).detach()
## Photometric loss:
img_diff = (_elementwise_l1(img_l_aug, img_r_warp) * (1.0 - self._ssim_w) + _SSIM(img_l_aug, img_r_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_img = (img_diff[left_occ]).mean()
img_diff[~left_occ].detach_()
## Disparities smoothness
loss_smooth = _smoothness_motion_2nd(disp_l, img_l_aug, beta=10.0).mean() / (2 ** ii)
return loss_img + self._disp_smooth_w * loss_smooth, left_occ
def sceneflow_loss(self, sf_f, sf_b, disp_l1, disp_l2, disp_occ_l1, disp_occ_l2, k_l1_aug, k_l2_aug, img_l1_aug, img_l2_aug, aug_size, ii):
## Depth2Pts
_, _, h_dp, w_dp = sf_f.size()
disp_l1 = disp_l1 * w_dp
disp_l2 = disp_l2 * w_dp
## scale
local_scale = torch.zeros_like(aug_size)
local_scale[:, 0] = h_dp
local_scale[:, 1] = w_dp
pts1, k1_scale = pixel2pts_ms(k_l1_aug, disp_l1, local_scale / aug_size)
pts2, k2_scale = pixel2pts_ms(k_l2_aug, disp_l2, local_scale / aug_size)
_, pts1_tf, coord1 = pts2pixel_ms(k1_scale, pts1, sf_f, [h_dp, w_dp])
_, pts2_tf, coord2 = pts2pixel_ms(k2_scale, pts2, sf_b, [h_dp, w_dp])
# pts2_warp = reconstructPts(coord1, pts2)
# pts1_warp = reconstructPts(coord2, pts1)
flow_f = projectSceneFlow2Flow(k1_scale, sf_f, disp_l1)
flow_b = projectSceneFlow2Flow(k2_scale, sf_b, disp_l2)
occ_map_b = _adaptive_disocc_detection(flow_f).detach() * disp_occ_l2
occ_map_f = _adaptive_disocc_detection(flow_b).detach() * disp_occ_l1
## Image reconstruction loss
# img_l2_warp = self.warping_layer_aug(img_l2, flow_f, aug_scale, coords)
# img_l1_warp = self.warping_layer_aug(img_l1, flow_b, aug_scale, coords)
img_l2_warp = reconstructImg(coord1, img_l2_aug)
img_l1_warp = reconstructImg(coord2, img_l1_aug)
img_diff1 = (_elementwise_l1(img_l1_aug, img_l2_warp) * (1.0 - self._ssim_w) + _SSIM(img_l1_aug, img_l2_warp) * self._ssim_w).mean(dim=1, keepdim=True)
img_diff2 = (_elementwise_l1(img_l2_aug, img_l1_warp) * (1.0 - self._ssim_w) + _SSIM(img_l2_aug, img_l1_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_im1 = img_diff1[occ_map_f].mean()
loss_im2 = img_diff2[occ_map_b].mean()
img_diff1[~occ_map_f].detach_()
img_diff2[~occ_map_b].detach_()
loss_im = loss_im1 + loss_im2
# ## Point Reconstruction Loss
pts_norm1 = torch.norm(pts1, p=2, dim=1, keepdim=True)
pts_norm2 = torch.norm(pts2, p=2, dim=1, keepdim=True)
# pts_diff1 = _elementwise_epe(pts1_tf, pts2_warp).mean(dim=1, keepdim=True) / (pts_norm1 + 1e-8)
# pts_diff2 = _elementwise_epe(pts2_tf, pts1_warp).mean(dim=1, keepdim=True) / (pts_norm2 + 1e-8)
# loss_pts1 = pts_diff1[occ_map_f].mean()
# loss_pts2 = pts_diff2[occ_map_b].mean()
# pts_diff1[~occ_map_f].detach_()
# pts_diff2[~occ_map_b].detach_()
# loss_pts = loss_pts1 + loss_pts2
## 3D motion smoothness loss
loss_3d_s = ( (_smoothness_motion_2nd(sf_f, img_l1_aug, beta=10.0) / (pts_norm1 + 1e-8)).mean() + (_smoothness_motion_2nd(sf_b, img_l2_aug, beta=10.0) / (pts_norm2 + 1e-8)).mean() ) / (2 ** ii)
## Loss Summnation
sceneflow_loss = loss_im + self._sf_3d_sm * loss_3d_s# + self._sf_3d_pts * loss_pts
return sceneflow_loss, loss_im, loss_3d_s#, loss_pts
def detaching_grad_of_outputs(self, output_dict):
for ii in range(0, len(output_dict['flow_f'])):
output_dict['flow_f'][ii].detach_()
output_dict['flow_b'][ii].detach_()
output_dict['disp_l1'][ii].detach_()
output_dict['disp_l2'][ii].detach_()
return None
def forward(self, output_dict, target_dict):
loss_dict = {}
## SceneFlow Loss
batch_size = target_dict['input_l1'].size(0)
loss_sf_sum = 0
loss_dp_sum = 0
loss_sf_2d = 0
# loss_sf_3d = 0
loss_sf_sm = 0
k_l1_aug = target_dict['input_k_l1_aug']
k_l2_aug = target_dict['input_k_l2_aug']
aug_size = target_dict['aug_size']
disp_r1_dict = output_dict['output_dict_r']['disp_l1']
disp_r2_dict = output_dict['output_dict_r']['disp_l2']
for ii, (sf_f, sf_b, disp_l1, disp_l2, disp_r1, disp_r2) in enumerate(zip(output_dict['flow_f'], output_dict['flow_b'], output_dict['disp_l1'], output_dict['disp_l2'], disp_r1_dict, disp_r2_dict)):
assert(sf_f.size()[2:4] == sf_b.size()[2:4])
assert(sf_f.size()[2:4] == disp_l1.size()[2:4])
assert(sf_f.size()[2:4] == disp_l2.size()[2:4])
## For image reconstruction loss
img_l1_aug = interpolate2d_as(target_dict["input_l1_aug"], sf_f)
img_l2_aug = interpolate2d_as(target_dict["input_l2_aug"], sf_b)
img_r1_aug = interpolate2d_as(target_dict["input_r1_aug"], sf_f)
img_r2_aug = interpolate2d_as(target_dict["input_r2_aug"], sf_b)
## Depth Loss
loss_disp_l1, disp_occ_l1 = self.depth_loss_left_img(disp_l1, disp_r1, img_l1_aug, img_r1_aug, ii)
loss_disp_l2, disp_occ_l2 = self.depth_loss_left_img(disp_l2, disp_r2, img_l2_aug, img_r2_aug, ii)
loss_dp_sum = loss_dp_sum + (loss_disp_l1 + loss_disp_l2) * self._weights[ii]
## Sceneflow Loss
loss_sceneflow, loss_im, loss_3d_s = self.sceneflow_loss(sf_f, sf_b,
disp_l1, disp_l2,
disp_occ_l1, disp_occ_l2,
k_l1_aug, k_l2_aug,
img_l1_aug, img_l2_aug,
aug_size, ii)
loss_sf_sum = loss_sf_sum + loss_sceneflow * self._weights[ii]
loss_sf_2d = loss_sf_2d + loss_im
# loss_sf_3d = loss_sf_3d + loss_pts
loss_sf_sm = loss_sf_sm + loss_3d_s
# finding weight
f_loss = loss_sf_sum.detach()
d_loss = loss_dp_sum.detach()
max_val = torch.max(f_loss, d_loss)
f_weight = max_val / f_loss
d_weight = max_val / d_loss
total_loss = loss_sf_sum * f_weight + loss_dp_sum * d_weight
loss_dict = {}
loss_dict["dp"] = loss_dp_sum
loss_dict["sf"] = loss_sf_sum
loss_dict["s_2"] = loss_sf_2d
# loss_dict["s_3"] = loss_sf_3d
loss_dict["s_3s"] = loss_sf_sm
loss_dict["total_loss"] = total_loss
self.detaching_grad_of_outputs(output_dict['output_dict_r'])
return loss_dict
class Loss_SceneFlow_SelfSup_NoPtsNoOcc(nn.Module):
def __init__(self, args):
super(Loss_SceneFlow_SelfSup_NoPtsNoOcc, self).__init__()
self._weights = [4.0, 2.0, 1.0, 1.0, 1.0]
self._ssim_w = 0.85
self._disp_smooth_w = 0.1
self._sf_3d_pts = 0.2
self._sf_3d_sm = 200
def depth_loss_left_img(self, disp_l, disp_r, img_l_aug, img_r_aug, ii):
img_r_warp = _generate_image_left(img_r_aug, disp_l)
# left_occ = _adaptive_disocc_detection_disp(disp_r).detach()
## Photometric loss:
img_diff = (_elementwise_l1(img_l_aug, img_r_warp) * (1.0 - self._ssim_w) + _SSIM(img_l_aug, img_r_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_img = img_diff.mean()
# loss_img = (img_diff[left_occ]).mean()
# img_diff[~left_occ].detach_()
## Disparities smoothness
loss_smooth = _smoothness_motion_2nd(disp_l, img_l_aug, beta=10.0).mean() / (2 ** ii)
return loss_img + self._disp_smooth_w * loss_smooth#, left_occ
def sceneflow_loss(self, sf_f, sf_b, disp_l1, disp_l2, k_l1_aug, k_l2_aug, img_l1_aug, img_l2_aug, aug_size, ii):
## Depth2Pts
_, _, h_dp, w_dp = sf_f.size()
disp_l1 = disp_l1 * w_dp
disp_l2 = disp_l2 * w_dp
## scale
local_scale = torch.zeros_like(aug_size)
local_scale[:, 0] = h_dp
local_scale[:, 1] = w_dp
pts1, k1_scale = pixel2pts_ms(k_l1_aug, disp_l1, local_scale / aug_size)
pts2, k2_scale = pixel2pts_ms(k_l2_aug, disp_l2, local_scale / aug_size)
_, pts1_tf, coord1 = pts2pixel_ms(k1_scale, pts1, sf_f, [h_dp, w_dp])
_, pts2_tf, coord2 = pts2pixel_ms(k2_scale, pts2, sf_b, [h_dp, w_dp])
# pts2_warp = reconstructPts(coord1, pts2)
# pts1_warp = reconstructPts(coord2, pts1)
flow_f = projectSceneFlow2Flow(k1_scale, sf_f, disp_l1)
flow_b = projectSceneFlow2Flow(k2_scale, sf_b, disp_l2)
# occ_map_b = _adaptive_disocc_detection(flow_f).detach() * disp_occ_l2
# occ_map_f = _adaptive_disocc_detection(flow_b).detach() * disp_occ_l1
## Image reconstruction loss
# img_l2_warp = self.warping_layer_aug(img_l2, flow_f, aug_scale, coords)
# img_l1_warp = self.warping_layer_aug(img_l1, flow_b, aug_scale, coords)
img_l2_warp = reconstructImg(coord1, img_l2_aug)
img_l1_warp = reconstructImg(coord2, img_l1_aug)
img_diff1 = (_elementwise_l1(img_l1_aug, img_l2_warp) * (1.0 - self._ssim_w) + _SSIM(img_l1_aug, img_l2_warp) * self._ssim_w).mean(dim=1, keepdim=True)
img_diff2 = (_elementwise_l1(img_l2_aug, img_l1_warp) * (1.0 - self._ssim_w) + _SSIM(img_l2_aug, img_l1_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_im1 = img_diff1.mean()
loss_im2 = img_diff2.mean()
# loss_im1 = img_diff1[occ_map_f].mean()
# loss_im2 = img_diff2[occ_map_b].mean()
# img_diff1[~occ_map_f].detach_()
# img_diff2[~occ_map_b].detach_()
loss_im = loss_im1 + loss_im2
## Point Reconstruction Loss
pts_norm1 = torch.norm(pts1, p=2, dim=1, keepdim=True)
pts_norm2 = torch.norm(pts2, p=2, dim=1, keepdim=True)
# pts_diff1 = _elementwise_epe(pts1_tf, pts2_warp).mean(dim=1, keepdim=True) / (pts_norm1 + 1e-8)
# pts_diff2 = _elementwise_epe(pts2_tf, pts1_warp).mean(dim=1, keepdim=True) / (pts_norm2 + 1e-8)
# loss_pts1 = pts_diff1.mean()
# loss_pts2 = pts_diff2.mean()
# loss_pts1 = pts_diff1[occ_map_f].mean()
# loss_pts2 = pts_diff2[occ_map_b].mean()
# pts_diff1[~occ_map_f].detach_()
# pts_diff2[~occ_map_b].detach_()
# loss_pts = loss_pts1 + loss_pts2
## 3D motion smoothness loss
loss_3d_s = ( (_smoothness_motion_2nd(sf_f, img_l1_aug, beta=10.0) / (pts_norm1 + 1e-8)).mean() + (_smoothness_motion_2nd(sf_b, img_l2_aug, beta=10.0) / (pts_norm2 + 1e-8)).mean() ) / (2 ** ii)
## Loss Summnation
sceneflow_loss = loss_im + self._sf_3d_sm * loss_3d_s # + self._sf_3d_pts * loss_pts
return sceneflow_loss, loss_im, loss_3d_s # , loss_pts
def detaching_grad_of_outputs(self, output_dict):
for ii in range(0, len(output_dict['flow_f'])):
output_dict['flow_f'][ii].detach_()
output_dict['flow_b'][ii].detach_()
output_dict['disp_l1'][ii].detach_()
output_dict['disp_l2'][ii].detach_()
return None
def forward(self, output_dict, target_dict):
loss_dict = {}
## SceneFlow Loss
batch_size = target_dict['input_l1'].size(0)
loss_sf_sum = 0
loss_dp_sum = 0
loss_sf_2d = 0
# loss_sf_3d = 0
loss_sf_sm = 0
k_l1_aug = target_dict['input_k_l1_aug']
k_l2_aug = target_dict['input_k_l2_aug']
aug_size = target_dict['aug_size']
disp_r1_dict = output_dict['output_dict_r']['disp_l1']
disp_r2_dict = output_dict['output_dict_r']['disp_l2']
for ii, (sf_f, sf_b, disp_l1, disp_l2, disp_r1, disp_r2) in enumerate(zip(output_dict['flow_f'], output_dict['flow_b'], output_dict['disp_l1'], output_dict['disp_l2'], disp_r1_dict, disp_r2_dict)):
assert(sf_f.size()[2:4] == sf_b.size()[2:4])
assert(sf_f.size()[2:4] == disp_l1.size()[2:4])
assert(sf_f.size()[2:4] == disp_l2.size()[2:4])
## For image reconstruction loss
img_l1_aug = interpolate2d_as(target_dict["input_l1_aug"], sf_f)
img_l2_aug = interpolate2d_as(target_dict["input_l2_aug"], sf_b)
img_r1_aug = interpolate2d_as(target_dict["input_r1_aug"], sf_f)
img_r2_aug = interpolate2d_as(target_dict["input_r2_aug"], sf_b)
## Depth Loss
loss_disp_l1 = self.depth_loss_left_img(disp_l1, disp_r1, img_l1_aug, img_r1_aug, ii)
loss_disp_l2 = self.depth_loss_left_img(disp_l2, disp_r2, img_l2_aug, img_r2_aug, ii)
loss_dp_sum = loss_dp_sum + (loss_disp_l1 + loss_disp_l2) * self._weights[ii]
## Sceneflow Loss
loss_sceneflow, loss_im, loss_3d_s = self.sceneflow_loss(sf_f, sf_b,
disp_l1, disp_l2,
k_l1_aug, k_l2_aug,
img_l1_aug, img_l2_aug,
aug_size, ii)
loss_sf_sum = loss_sf_sum + loss_sceneflow * self._weights[ii]
loss_sf_2d = loss_sf_2d + loss_im
# loss_sf_3d = loss_sf_3d + loss_pts
loss_sf_sm = loss_sf_sm + loss_3d_s
# finding weight
f_loss = loss_sf_sum.detach()
d_loss = loss_dp_sum.detach()
max_val = torch.max(f_loss, d_loss)
f_weight = max_val / f_loss
d_weight = max_val / d_loss
total_loss = loss_sf_sum * f_weight + loss_dp_sum * d_weight
loss_dict = {}
loss_dict["dp"] = loss_dp_sum
loss_dict["sf"] = loss_sf_sum
loss_dict["s_2"] = loss_sf_2d
# loss_dict["s_3"] = loss_sf_3d
loss_dict["s_3s"] = loss_sf_sm
loss_dict["total_loss"] = total_loss
self.detaching_grad_of_outputs(output_dict['output_dict_r'])
return loss_dict
###############################################
## Ablation - Separate Decoder
###############################################
class Loss_Flow_Only(nn.Module):
def __init__(self):
super(Loss_Flow_Only, self).__init__()
self._weights = [4.0, 2.0, 1.0, 1.0, 1.0]
self._ssim_w = 0.85
self._warping_layer = WarpingLayer_Flow()
def forward(self, output_dict, target_dict):
## Loss
total_loss = 0
loss_sf_2d = 0
loss_sf_sm = 0
for ii, (sf_f, sf_b) in enumerate(zip(output_dict['flow_f'], output_dict['flow_b'])):
## Depth2Pts
img_l1 = interpolate2d_as(target_dict["input_l1_aug"], sf_f)
img_l2 = interpolate2d_as(target_dict["input_l2_aug"], sf_b)
img_l2_warp = self._warping_layer(img_l2, sf_f)
img_l1_warp = self._warping_layer(img_l1, sf_b)
occ_map_f = _adaptive_disocc_detection(sf_b).detach()
occ_map_b = _adaptive_disocc_detection(sf_f).detach()
img_diff1 = (_elementwise_l1(img_l1, img_l2_warp) * (1.0 - self._ssim_w) + _SSIM(img_l1, img_l2_warp) * self._ssim_w).mean(dim=1, keepdim=True)
img_diff2 = (_elementwise_l1(img_l2, img_l1_warp) * (1.0 - self._ssim_w) + _SSIM(img_l2, img_l1_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_im1 = img_diff1[occ_map_f].mean()
loss_im2 = img_diff2[occ_map_b].mean()
img_diff1[~occ_map_f].detach_()
img_diff2[~occ_map_b].detach_()
loss_im = loss_im1 + loss_im2
loss_smooth = _smoothness_motion_2nd(sf_f / 20.0, img_l1, beta=10.0).mean() + _smoothness_motion_2nd(sf_b / 20.0, img_l2, beta=10.0).mean()
total_loss = total_loss + (loss_im + 10.0 * loss_smooth) * self._weights[ii]
loss_sf_2d = loss_sf_2d + loss_im
loss_sf_sm = loss_sf_sm + loss_smooth
loss_dict = {}
loss_dict["ofd2"] = loss_sf_2d
loss_dict["ofs2"] = loss_sf_sm
loss_dict["total_loss"] = total_loss
return loss_dict
class Eval_Flow_Only(nn.Module):
def __init__(self):
super(Eval_Flow_Only, self).__init__()
def upsample_flow_as(self, flow, output_as):
size_inputs = flow.size()[2:4]
size_targets = output_as.size()[2:4]
resized_flow = tf.interpolate(flow, size=size_targets, mode="bilinear", align_corners=True)
# correct scaling of flow
u, v = resized_flow.chunk(2, dim=1)
u *= float(size_targets[1] / size_inputs[1])
v *= float(size_targets[0] / size_inputs[0])
return torch.cat([u, v], dim=1)
def forward(self, output_dict, target_dict):
loss_dict = {}
im_l1 = target_dict['input_l1']
batch_size, _, _, _ = im_l1.size()
gt_flow = target_dict['target_flow']
gt_flow_mask = target_dict['target_flow_mask']
## Flow EPE
out_flow = self.upsample_flow_as(output_dict['flow_f'][0], gt_flow)
valid_epe = _elementwise_epe(out_flow, gt_flow) * gt_flow_mask.float()
loss_dict["epe"] = (valid_epe.view(batch_size, -1).sum(1)).mean() / 91875.68
flow_gt_mag = torch.norm(target_dict["target_flow"], p=2, dim=1, keepdim=True) + 1e-8
outlier_epe = (valid_epe > 3).float() * ((valid_epe / flow_gt_mag) > 0.05).float() * gt_flow_mask
loss_dict["f1"] = (outlier_epe.view(batch_size, -1).sum(1)).mean() / 91875.68
output_dict["out_flow_pp"] = out_flow
return loss_dict
class Loss_Disp_Only(nn.Module):
def __init__(self, args):
super(Loss_Disp_Only, self).__init__()
self._weights = [4.0, 2.0, 1.0, 1.0, 1.0]
self._ssim_w = 0.85
self._disp_smooth_w = 0.1
def depth_loss_left_img(self, disp_l, disp_r, img_l_aug, img_r_aug, ii):
img_r_warp = _generate_image_left(img_r_aug, disp_l)
left_occ = _adaptive_disocc_detection_disp(disp_r).detach()
## Image loss:
img_diff = (_elementwise_l1(img_l_aug, img_r_warp) * (1.0 - self._ssim_w) + _SSIM(img_l_aug, img_r_warp) * self._ssim_w).mean(dim=1, keepdim=True)
loss_img = (img_diff[left_occ]).mean()
img_diff[~left_occ].detach_()
## Disparities smoothness
loss_smooth = _smoothness_motion_2nd(disp_l, img_l_aug, beta=10.0).mean() / (2 ** ii)
return loss_img + self._disp_smooth_w * loss_smooth, left_occ
def detaching_grad_of_outputs(self, output_dict):
for ii in range(0, len(output_dict['disp_l1'])):
output_dict['disp_l1'][ii].detach_()
output_dict['disp_l2'][ii].detach_()
return None
def forward(self, output_dict, target_dict):
loss_dict = {}
## SceneFlow Loss
batch_size = target_dict['input_l1'].size(0)
loss_dp_sum = 0
k_l1_aug = target_dict['input_k_l1_aug']
k_l2_aug = target_dict['input_k_l2_aug']
aug_size = target_dict['aug_size']
disp_r1_dict = output_dict['output_dict_r']['disp_l1']
disp_r2_dict = output_dict['output_dict_r']['disp_l2']
for ii, (disp_l1, disp_l2, disp_r1, disp_r2) in enumerate(zip(output_dict['disp_l1'], output_dict['disp_l2'], disp_r1_dict, disp_r2_dict)):
assert(disp_l1.size()[2:4] == disp_l2.size()[2:4])
## For image reconstruction loss
img_l1_aug = interpolate2d_as(target_dict["input_l1_aug"], disp_l1)
img_l2_aug = interpolate2d_as(target_dict["input_l2_aug"], disp_l2)
img_r1_aug = interpolate2d_as(target_dict["input_r1_aug"], disp_l1)
img_r2_aug = interpolate2d_as(target_dict["input_r2_aug"], disp_l2)
## Depth Loss
loss_disp_l1, _ = self.depth_loss_left_img(disp_l1, disp_r1, img_l1_aug, img_r1_aug, ii)
loss_disp_l2, _ = self.depth_loss_left_img(disp_l2, disp_r2, img_l2_aug, img_r2_aug, ii)
loss_dp_sum = loss_dp_sum + (loss_disp_l1 + loss_disp_l2) * self._weights[ii]
total_loss = loss_dp_sum
loss_dict = {}
loss_dict["dp"] = loss_dp_sum
loss_dict["total_loss"] = total_loss
self.detaching_grad_of_outputs(output_dict['output_dict_r'])
return loss_dict
class Eval_Disp_Only(nn.Module):
def __init__(self):
super(Eval_Disp_Only, self).__init__()
def forward(self, output_dict, target_dict):
loss_dict = {}
## Depth Eval
gt_disp = target_dict['target_disp']
gt_disp_mask = (target_dict['target_disp_mask']==1)
intrinsics = target_dict['input_k_l1']
out_disp_l1 = interpolate2d_as(output_dict["disp_l1_pp"][0], gt_disp, mode="bilinear") * gt_disp.size(3)
out_depth_l1 = _disp2depth_kitti_K(out_disp_l1, intrinsics[:, 0, 0])
out_depth_l1 = torch.clamp(out_depth_l1, 1e-3, 80)
gt_depth_pp = _disp2depth_kitti_K(gt_disp, intrinsics[:, 0, 0])
output_dict_displ = eval_module_disp_depth(gt_disp, gt_disp_mask, out_disp_l1, gt_depth_pp, out_depth_l1)
output_dict["out_disp_l_pp"] = out_disp_l1
output_dict["out_depth_l_pp"] = out_depth_l1
loss_dict["d1"] = output_dict_displ['otl']
loss_dict["ab"] = output_dict_displ['abs_rel']
loss_dict["sq"] = output_dict_displ['sq_rel']
loss_dict["rms"] = output_dict_displ['rms']
loss_dict["lrms"] = output_dict_displ['log_rms']
loss_dict["a1"] = output_dict_displ['a1']
loss_dict["a2"] = output_dict_displ['a2']
loss_dict["a3"] = output_dict_displ['a3']
return loss_dict
###############################################
## MonoDepth Experiment
###############################################
class Basis_MonoDepthLoss(nn.Module):
def __init__(self):
super(Basis_MonoDepthLoss, self).__init__()
self.ssim_w = 0.85
self.disp_gradient_w = 0.1
self.lr_w = 1.0
self.n = 4
def scale_pyramid(self, img_input, depths):
scaled_imgs = []
for _, depth in enumerate(depths):
scaled_imgs.append(interpolate2d_as(img_input, depth))
return scaled_imgs
def gradient_x(self, img):
# Pad input to keep output size consistent
img = tf.pad(img, (0, 1, 0, 0), mode="replicate")
gx = img[:, :, :, :-1] - img[:, :, :, 1:] # NCHW
return gx
def gradient_y(self, img):
# Pad input to keep output size consistent
img = tf.pad(img, (0, 0, 0, 1), mode="replicate")
gy = img[:, :, :-1, :] - img[:, :, 1:, :] # NCHW
return gy
def apply_disparity(self, img, disp):
batch_size, _, height, width = img.size()
# Original coordinates of pixels
x_base = torch.linspace(0, 1, width).repeat(batch_size, height, 1).type_as(img)
y_base = torch.linspace(0, 1, height).repeat(batch_size, width, 1).transpose(1, 2).type_as(img)
# Apply shift in X direction
x_shifts = disp[:, 0, :, :] # Disparity is passed in NCHW format with 1 channel
flow_field = torch.stack((x_base + x_shifts, y_base), dim=3)
# In grid_sample coordinates are assumed to be between -1 and 1
output = tf.grid_sample(img, 2 * flow_field - 1, mode='bilinear', padding_mode='zeros')
return output
def generate_image_left(self, img, disp):
return self.apply_disparity(img, -disp)
def generate_image_right(self, img, disp):
return self.apply_disparity(img, disp)
def SSIM(self, x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = nn.AvgPool2d(3, 1)(x)
mu_y = nn.AvgPool2d(3, 1)(y)
mu_x_mu_y = mu_x * mu_y
mu_x_sq = mu_x.pow(2)
mu_y_sq = mu_y.pow(2)
sigma_x = nn.AvgPool2d(3, 1)(x * x) - mu_x_sq
sigma_y = nn.AvgPool2d(3, 1)(y * y) - mu_y_sq
sigma_xy = nn.AvgPool2d(3, 1)(x * y) - mu_x_mu_y
SSIM_n = (2 * mu_x_mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x_sq + mu_y_sq + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
SSIM_img = torch.clamp((1 - SSIM) / 2, 0, 1)
return tf.pad(SSIM_img, pad=(1,1,1,1), mode='constant', value=0)
def disp_smoothness(self, disp, pyramid):
disp_gradients_x = [self.gradient_x(d) for d in disp]
disp_gradients_y = [self.gradient_y(d) for d in disp]
image_gradients_x = [self.gradient_x(img) for img in pyramid]
image_gradients_y = [self.gradient_y(img) for img in pyramid]
weights_x = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in image_gradients_x]
weights_y = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in image_gradients_y]
smoothness_x = [disp_gradients_x[i] * weights_x[i] for i in range(self.n)]
smoothness_y = [disp_gradients_y[i] * weights_y[i] for i in range(self.n)]
return [torch.abs(smoothness_x[i]) + torch.abs(smoothness_y[i]) for i in range(self.n)]
def forward(self, disp_l, disp_r, img_l, img_r):
self.n = len(disp_l)
## Image pyramid
img_l_pyramid = self.scale_pyramid(img_l, disp_l)
img_r_pyramid = self.scale_pyramid(img_r, disp_r)
## Disocc map
right_occ = [_adaptive_disocc_detection_disp(-disp_l[i]) for i in range(self.n)]
left_occ = [_adaptive_disocc_detection_disp(disp_r[i]) for i in range(self.n)]
## Image reconstruction loss
left_est = [self.generate_image_left(img_r_pyramid[i], disp_l[i]) for i in range(self.n)]
right_est = [self.generate_image_right(img_l_pyramid[i], disp_r[i]) for i in range(self.n)]
# L1
l1_left = [torch.mean((torch.abs(left_est[i] - img_l_pyramid[i])).mean(dim=1, keepdim=True)[left_occ[i]]) for i in range(self.n)]
l1_right = [torch.mean((torch.abs(right_est[i] - img_r_pyramid[i])).mean(dim=1, keepdim=True)[right_occ[i]]) for i in range(self.n)]
# SSIM
ssim_left = [torch.mean((self.SSIM(left_est[i], img_l_pyramid[i])).mean(dim=1, keepdim=True)[left_occ[i]]) for i in range(self.n)]
ssim_right = [torch.mean((self.SSIM(right_est[i], img_r_pyramid[i])).mean(dim=1, keepdim=True)[right_occ[i]]) for i in range(self.n)]
image_loss_left = [self.ssim_w * ssim_left[i] + (1 - self.ssim_w) * l1_left[i] for i in range(self.n)]
image_loss_right = [self.ssim_w * ssim_right[i] + (1 - self.ssim_w) * l1_right[i] for i in range(self.n)]
image_loss = sum(image_loss_left + image_loss_right)
## L-R Consistency loss
right_left_disp = [self.generate_image_left(disp_r[i], disp_l[i]) for i in range(self.n)]
left_right_disp = [self.generate_image_right(disp_l[i], disp_r[i]) for i in range(self.n)]
lr_left_loss = [torch.mean((torch.abs(right_left_disp[i] - disp_l[i]))[left_occ[i]]) for i in range(self.n)]
lr_right_loss = [torch.mean((torch.abs(left_right_disp[i] - disp_r[i]))[right_occ[i]]) for i in range(self.n)]
lr_loss = sum(lr_left_loss + lr_right_loss)
## Disparities smoothness
disp_left_smoothness = self.disp_smoothness(disp_l, img_l_pyramid)
disp_right_smoothness = self.disp_smoothness(disp_r, img_r_pyramid)
disp_left_loss = [torch.mean(torch.abs(disp_left_smoothness[i])) / 2 ** i for i in range(self.n)]
disp_right_loss = [torch.mean(torch.abs(disp_right_smoothness[i])) / 2 ** i for i in range(self.n)]
disp_gradient_loss = sum(disp_left_loss + disp_right_loss)
## Loss sum
loss = image_loss + self.disp_gradient_w * disp_gradient_loss + self.lr_w * lr_loss
return loss
class Loss_MonoDepth(nn.Module):
def __init__(self):
super(Loss_MonoDepth, self).__init__()
self._depth_loss = Basis_MonoDepthLoss()
def forward(self, output_dict, target_dict):
loss_dict = {}
depth_loss = self._depth_loss(output_dict['disp_l1'], output_dict['disp_r1'], target_dict['input_l1'], target_dict['input_r1'])
loss_dict['total_loss'] = depth_loss
return loss_dict
class Eval_MonoDepth(nn.Module):
def __init__(self):
super(Eval_MonoDepth, self).__init__()
def forward(self, output_dict, target_dict):
loss_dict = {}
## Depth Eval
gt_disp = target_dict['target_disp']
gt_disp_mask = (target_dict['target_disp_mask']==1)
intrinsics = target_dict['input_k_l1_orig']
out_disp_l_pp = interpolate2d_as(output_dict["disp_l1_pp"][0], gt_disp, mode="bilinear") * gt_disp.size(3)
out_depth_l_pp = _disp2depth_kitti_K(out_disp_l_pp, intrinsics[:, 0, 0])
out_depth_l_pp = torch.clamp(out_depth_l_pp, 1e-3, 80)
gt_depth_pp = _disp2depth_kitti_K(gt_disp, intrinsics[:, 0, 0])
output_dict_displ = eval_module_disp_depth(gt_disp, gt_disp_mask, out_disp_l_pp, gt_depth_pp, out_depth_l_pp)
output_dict["out_disp_l_pp"] = out_disp_l_pp
output_dict["out_depth_l_pp"] = out_depth_l_pp
loss_dict["ab_r"] = output_dict_displ['abs_rel']
loss_dict["sq_r"] = output_dict_displ['sq_rel']
return loss_dict
###############################################
|
5cfdaf1b9075960ad7b9d05b07fd5b52dbcb85dc
|
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
|
/mmseg/datasets/night_driving.py
|
3ead91ec77cbd8e3f0a870dee3462549183e9c9b
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmsegmentation
|
0d12092312e2c465ede1fd7dd9847b6f2b37049c
|
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
|
refs/heads/main
| 2023-09-04T10:54:52.299711
| 2023-07-24T07:28:21
| 2023-07-24T07:28:21
| 272,133,018
| 6,534
| 2,375
|
Apache-2.0
| 2023-09-14T01:22:32
| 2020-06-14T04:32:33
|
Python
|
UTF-8
|
Python
| false
| false
| 514
|
py
|
night_driving.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .cityscapes import CityscapesDataset
@DATASETS.register_module()
class NightDrivingDataset(CityscapesDataset):
"""NightDrivingDataset dataset."""
def __init__(self,
img_suffix='_leftImg8bit.png',
seg_map_suffix='_gtCoarse_labelTrainIds.png',
**kwargs) -> None:
super().__init__(
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
aebcb716e8abdd34cd7f37d0751148b9204451ad
|
6855e57c80b2ae8563c9aeb7837b722966527169
|
/PyBioMed/PyGetMol/Getmol.py
|
c66b335ae9feb0c9e981e4d7874acdea28f42989
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
gadsbyfly/PyBioMed
|
613612ac9dac02676eab25fc3609a3ba376939ee
|
45440d8a70b2aa2818762ceadb499dd3a1df90bc
|
refs/heads/master
| 2023-04-14T11:42:12.406985
| 2023-04-10T11:46:54
| 2023-04-10T11:46:54
| 115,875,908
| 112
| 61
|
BSD-3-Clause
| 2022-09-07T13:27:35
| 2017-12-31T17:11:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,259
|
py
|
Getmol.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
This module is to get different formats of molecules from file and web. If you
have any question please contact me via email.
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com
"""
try:
# Python 3
from urllib.request import urlopen
except ImportError:
# Python 2
from urllib2 import urlopen
# Core Library modules
import os
import re
import string
# Third party modules
from rdkit import Chem
Version = 1.0
def ReadMolFromSDF(filename=""):
"""
Read a set of molecules by SDF file format.
Note: the output of this function is a set of molecular objects.
You need to use for statement to call each object.
Usage:
res=ReadMolFromSDF(filename)
Input: filename is a file name with path.
Output: res is a set of molecular object.
"""
molset = Chem.SDMolSupplier(filename)
return molset
def ReadMolFromMOL(filename=""):
"""
Read a molecule by mol file format.
Usage:
res=ReadMolFromMOL(filename)
Input: filename is a file name with path.
Output: res is a molecular object.
"""
mol = Chem.MolFromMolFile(filename)
return mol
def ReadMolFromSmile(smi=""):
"""
#################################################################
Read a molecule by SMILES string.
Usage:
res=ReadMolFromSmile(smi)
Input: smi is a SMILES string.
Output: res is a molecule object.
#################################################################
"""
mol = Chem.MolFromSmiles(smi.strip())
return mol
def ReadMolFromInchi(inchi=""):
"""
#################################################################
Read a molecule by Inchi string.
Usage:
res=ReadMolFromInchi(inchi)
Input: inchi is a InChi string.
Output: res is a molecule object.
#################################################################
"""
from openbabel import pybel
temp = pybel.readstring("inchi", inchi)
smi = temp.write("smi")
mol = Chem.MolFromSmiles(smi.strip())
return mol
def ReadMolFromMol(filename=""):
"""
#################################################################
Read a molecule with mol file format.
Usage:
res=ReadMolFromMol(filename)
Input: filename is a file name.
Output: res is a molecule object.
#################################################################
"""
mol = Chem.MolFromMolFile(filename)
return mol
#############################################################################
def GetMolFromCAS(casid=""):
"""
Downloading the molecules from http://www.chemnet.com/cas/ by CAS ID (casid).
if you want to use this function, you must be install pybel.
"""
from openbabel import pybel
casid = casid.strip()
localfile = urlopen(
"http://www.chemnet.com/cas/supplier.cgi?terms=" + casid + "&l=&exact=dict"
)
temp = localfile.readlines()
for i in temp:
if re.findall("InChI=", i) == ["InChI="]:
k = i.split(' <td align="left">')
kk = k[1].split("</td>\r\n")
if kk[0][0:5] == "InChI":
res = kk[0]
else:
res = "None"
localfile.close()
mol = pybel.readstring("inchi", res.strip())
smile = mol.write("smi")
return smile.strip()
def GetMolFromEBI():
"""
"""
pass
def GetMolFromNCBI(cid=""):
"""
Downloading the molecules from http://pubchem.ncbi.nlm.nih.gov/ by cid (cid).
"""
cid = cid.strip()
localfile = urlopen(
"http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi?cid="
+ cid
+ "&disopt=SaveSDF"
)
temp = localfile.readlines()
f = file("temp.sdf", "w")
f.writelines(temp)
f.close()
localfile.close()
m = Chem.MolFromMolFile("temp.sdf")
os.remove("temp.sdf")
temp = Chem.MolToSmiles(m, isomericSmiles=True)
return temp
def GetMolFromDrugbank(dbid=""):
"""
Downloading the molecules from http://www.drugbank.ca/ by dbid (dbid).
"""
dbid = dbid.strip()
localfile = urlopen("http://www.drugbank.ca/drugs/" + dbid + ".sdf")
temp = localfile.readlines()
f = file("temp.sdf", "w")
f.writelines(temp)
f.close()
localfile.close()
m = Chem.MolFromMolFile("temp.sdf")
os.remove("temp.sdf")
temp = Chem.MolToSmiles(m, isomericSmiles=True)
return temp
def GetMolFromKegg(kid=""):
"""
Downloading the molecules from http://www.genome.jp/ by kegg id (kid).
"""
ID = str(kid)
localfile = urlopen("http://www.genome.jp/dbget-bin/www_bget?-f+m+drug+" + ID)
temp = localfile.readlines()
f = file("temp.mol", "w")
f.writelines(temp)
f.close()
localfile.close()
m = Chem.MolFromMolFile("temp.mol")
os.remove("temp.mol")
temp = Chem.MolToSmiles(m, isomericSmiles=True)
return temp
#############################################################################
if __name__ == "__main__":
print("-" * 10 + "START" + "-" * 10)
print("Only PyBioMed is successfully installed the code below can be run!")
from PyBioMed.PyGetMol.GetProtein import timelimited
@timelimited(10)
def run_GetMolFromCAS():
temp = GetMolFromCAS(casid="50-12-4")
print(temp)
@timelimited(10)
def run_GetMolFromNCBI():
temp = GetMolFromNCBI(cid="2244")
print(temp)
@timelimited(10)
def run_GetMolFromDrugbank():
temp = GetMolFromDrugbank(dbid="DB00133")
print(temp)
@timelimited(10)
def run_GetMolFromKegg():
temp = GetMolFromKegg(kid="D02176")
print(temp)
run_GetMolFromCAS()
print("-" * 25)
run_GetMolFromNCBI()
print("-" * 25)
run_GetMolFromDrugbank()
print("-" * 25)
run_GetMolFromKegg()
print("-" * 10 + "END" + "-" * 10)
|
e3b7b38ba8d5ebce09cd03694a5e5069f392542c
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/api_tests/nodes/views/test_node_contributors_and_group_members_list.py
|
5c31326ff2c2cc857f8f8668079385df9644f499
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,509
|
py
|
test_node_contributors_and_group_members_list.py
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
ProjectFactory,
OSFGroupFactory,
AuthUserFactory,
)
from osf.utils.permissions import READ, WRITE
@pytest.fixture()
def non_contributor():
return AuthUserFactory()
@pytest.fixture()
def admin_contributor():
return AuthUserFactory()
@pytest.fixture()
def write_contributor():
return AuthUserFactory()
@pytest.fixture()
def group_manager():
user = AuthUserFactory()
user.given_name = 'Dawn'
user.save()
return user
@pytest.fixture()
def group_member():
return AuthUserFactory()
@pytest.fixture()
def group_member_and_contributor():
return AuthUserFactory()
@pytest.fixture()
def group(group_manager, group_member, group_member_and_contributor):
group = OSFGroupFactory(creator=group_manager)
group.make_member(group_member)
group.make_member(group_member_and_contributor)
return group
@pytest.fixture()
def project(group, admin_contributor, write_contributor, group_member_and_contributor):
project = ProjectFactory(
creator=admin_contributor
)
project.add_contributor(write_contributor, WRITE)
project.add_contributor(group_member_and_contributor, READ)
project.add_osf_group(group)
return project
@pytest.mark.django_db
class TestNodeContributorsAndGroupMembers:
def test_list_and_filter_contributors_and_group_members(
self, app, project, admin_contributor, write_contributor, group_manager,
group_member, group_member_and_contributor, non_contributor):
url = '/{}nodes/{}/contributors_and_group_members/'.format(API_BASE, project._id)
# unauthenticated
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# noncontributor
res = app.get(url, auth=non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# write_contributor
res = app.get(url, auth=write_contributor.auth, expect_errors=True)
assert res.status_code == 200
# group_member
res = app.get(url, auth=group_member.auth, expect_errors=True)
assert res.status_code == 200
# assert all contributors and group members appear, no duplicates
res = app.get(url, auth=admin_contributor.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 5
expected = set([
admin_contributor._id,
write_contributor._id,
group_manager._id,
group_member._id,
group_member_and_contributor._id
])
actual = set([node['id'] for node in res.json['data']])
assert actual == expected
url = '/{}nodes/{}/contributors_and_group_members/?filter[given_name]={}'.format(API_BASE, project._id, group_manager.given_name)
res = app.get(url, auth=admin_contributor.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == group_manager._id
url = '/{}nodes/{}/contributors_and_group_members/?filter[given_name]=NOT_EVEN_A_NAME'.format(API_BASE, project._id)
res = app.get(url, auth=admin_contributor.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 0
|
b07302ce51f26925318defaf9fcae27934747875
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/experimental_extender.py
|
a9780696fd29ebfd014b582e38a7186d93c35b8f
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 412
|
py
|
experimental_extender.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.utils.graph import Node
from openvino.tools.mo.utils.ir_reader.extender import Extender
class ExperimentalDetectronROIFeatureExtractor_extender(Extender):
op = 'ExperimentalDetectronROIFeatureExtractor'
@staticmethod
def extend(op: Node):
Extender.attr_to_list(op, 'pyramid_scales')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.