hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acdfce481cc4874d67d075a4ac61103881f62d13 | 3,675 | py | Python | plynx/db/node_cache.py | live-wire/plynx | ed29db2b7880ed512974cd98993587886763a5f5 | [
"Apache-2.0"
] | null | null | null | plynx/db/node_cache.py | live-wire/plynx | ed29db2b7880ed512974cd98993587886763a5f5 | [
"Apache-2.0"
] | null | null | null | plynx/db/node_cache.py | live-wire/plynx | ed29db2b7880ed512974cd98993587886763a5f5 | [
"Apache-2.0"
] | null | null | null | import hashlib
from builtins import str
from plynx.constants import Collections
from plynx.db.db_object import DBObject, DBObjectField
from plynx.db.output import Output
from plynx.utils.common import ObjectId
from plynx.utils.config import get_demo_config
demo_config = get_demo_config()
class NodeCache(DBObject):
"""Basic Node Cache with db interface."""
FIELDS = {
'_id': DBObjectField(
type=ObjectId,
default=ObjectId,
is_list=False,
),
'key': DBObjectField(
type=str,
default='',
is_list=False,
),
'graph_id': DBObjectField(
type=ObjectId,
default=None,
is_list=False,
),
'node_id': DBObjectField(
type=ObjectId,
default=None,
is_list=False,
),
'outputs': DBObjectField(
type=Output,
default=list,
is_list=True,
),
'logs': DBObjectField(
type=Output,
default=list,
is_list=True,
),
# `protected` is used to prevent removing saved cache
'protected': DBObjectField(
type=bool,
default=False,
is_list=False,
),
'removed': DBObjectField(
type=bool,
default=False,
is_list=False,
),
}
DB_COLLECTION = Collections.NODE_CACHE
IGNORED_PARAMETERS = {'cmd', '_timeout'}
@staticmethod
def instantiate(node, graph_id, user_id):
"""Instantiate a Node Cache from Node.
Args:
node (Node): Node object
graph_id (ObjectId, str): Graph ID
user_id (ObjectId, str): User ID
Return:
(NodeCache)
"""
return NodeCache({
'key': NodeCache.generate_key(node, user_id),
'node_id': node._id,
'graph_id': graph_id,
'outputs': [output.to_dict() for output in node.outputs],
'logs': [log.to_dict() for log in node.logs],
})
# TODO after Demo: remove user_id
@staticmethod
def generate_key(node, user_id):
"""Generate hash.
Args:
node (Node): Node object
user_id (ObjectId, str): User ID
Return:
(str) Hash value
"""
if not demo_config.enabled:
user_id = '' # TODO after demo
inputs = node.inputs
parameters = node.parameters
parent_node = node.parent_node
sorted_inputs = sorted(inputs, key=lambda x: x.name)
inputs_hash = ','.join([
'{}:{}'.format(
input.name,
','.join(sorted(map(lambda x: x.resource_id, input.values)))
)
for input in sorted_inputs
])
sorted_parameters = sorted(parameters, key=lambda x: x.name)
parameters_hash = ','.join([
'{}:{}'.format(
parameter.name,
parameter.value
)
for parameter in sorted_parameters if parameter.name not in NodeCache.IGNORED_PARAMETERS
])
return hashlib.sha256(
'{};{};{};{}'.format(
parent_node,
inputs_hash,
parameters_hash,
str(user_id)).encode('utf-8')
).hexdigest()
def __str__(self):
return 'NodeCache(_id="{}")'.format(self._id)
def __repr__(self):
return 'NodeCache({})'.format(str(self.to_dict()))
| 27.631579 | 100 | 0.512925 |
acdfce54c7a59fd3ec832d340450a3792a418a3e | 1,407 | py | Python | kernel-modules/build/apply-blocklist.py | kudz00/collector | 73a5366e4d95bc33e85ed0d7578c74f0a2e8a0aa | [
"Apache-2.0"
] | 1 | 2022-03-31T15:25:16.000Z | 2022-03-31T15:25:16.000Z | kernel-modules/build/apply-blocklist.py | kudz00/collector | 73a5366e4d95bc33e85ed0d7578c74f0a2e8a0aa | [
"Apache-2.0"
] | 4 | 2022-03-31T16:16:00.000Z | 2022-03-31T23:24:33.000Z | kernel-modules/build/apply-blocklist.py | stackrox/collector | 4c3913176eb62636e32a8a56f889e611c638de73 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
import re
# Strips whitespaces and comments at the end of line, i.e., turns ' foo # bar' into ' foo' and
# ' # comment line only' into ''.
strip_comment_re = re.compile(r'\s*(?:#.*)?$')
space_re = re.compile(r'\s+')
def pattern_to_re(pat):
if not pat:
return ".*"
if pat[0] == '~':
return pat[1:]
parts = pat.split('*')
return '.*'.join(re.escape(part) for part in parts)
def open_input(filename):
if filename == '-':
return sys.stdin
return open(filename)
def main(blocklist_file, tasks_file='-'):
blocklist_regexes = []
with open(blocklist_file, 'r') as blocklist:
for line in blocklist:
line = strip_comment_re.sub('', line).strip()
if not line:
continue
parts = space_re.split(line)
parts = (parts + ["*", "*"])[:3]
part_res = [pattern_to_re(p) for p in parts]
blocklist_regexes.append(r'\s+'.join(part_res))
blocklist_re = re.compile(r'^(?:%s)$' % r'|'.join((r'(?:%s)' % r for r in blocklist_regexes)))
with open_input(tasks_file) as f:
for line in f:
line = strip_comment_re.sub('', line).strip()
if not line:
continue
if not blocklist_re.match(line):
print(line)
if __name__ == '__main__':
main(*sys.argv[1:])
| 27.057692 | 98 | 0.555082 |
acdfce6de74822c6b6bae3a138000f9297113b2f | 6,672 | py | Python | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/ModifyHostAvailabilityRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/ModifyHostAvailabilityRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/ModifyHostAvailabilityRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyHostAvailabilityRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'ModifyHostAvailability','cms')
self.set_method('POST')
def get_TaskOptionHttpMethod(self):
return self.get_query_params().get('TaskOption.HttpMethod')
def set_TaskOptionHttpMethod(self,TaskOptionHttpMethod):
self.add_query_param('TaskOption.HttpMethod',TaskOptionHttpMethod)
def get_TaskOptionHttpHeader(self):
return self.get_query_params().get('TaskOption.HttpHeader')
def set_TaskOptionHttpHeader(self,TaskOptionHttpHeader):
self.add_query_param('TaskOption.HttpHeader',TaskOptionHttpHeader)
def get_AlertConfigEscalationLists(self):
return self.get_query_params().get('AlertConfigEscalationList')
def set_AlertConfigEscalationLists(self, AlertConfigEscalationLists):
for depth1 in range(len(AlertConfigEscalationLists)):
if AlertConfigEscalationLists[depth1].get('Times') is not None:
self.add_query_param('AlertConfigEscalationList.' + str(depth1 + 1) + '.Times', AlertConfigEscalationLists[depth1].get('Times'))
if AlertConfigEscalationLists[depth1].get('MetricName') is not None:
self.add_query_param('AlertConfigEscalationList.' + str(depth1 + 1) + '.MetricName', AlertConfigEscalationLists[depth1].get('MetricName'))
if AlertConfigEscalationLists[depth1].get('Value') is not None:
self.add_query_param('AlertConfigEscalationList.' + str(depth1 + 1) + '.Value', AlertConfigEscalationLists[depth1].get('Value'))
if AlertConfigEscalationLists[depth1].get('Operator') is not None:
self.add_query_param('AlertConfigEscalationList.' + str(depth1 + 1) + '.Operator', AlertConfigEscalationLists[depth1].get('Operator'))
if AlertConfigEscalationLists[depth1].get('Aggregate') is not None:
self.add_query_param('AlertConfigEscalationList.' + str(depth1 + 1) + '.Aggregate', AlertConfigEscalationLists[depth1].get('Aggregate'))
def get_TaskName(self):
return self.get_query_params().get('TaskName')
def set_TaskName(self,TaskName):
self.add_query_param('TaskName',TaskName)
def get_AlertConfigSilenceTime(self):
return self.get_query_params().get('AlertConfig.SilenceTime')
def set_AlertConfigSilenceTime(self,AlertConfigSilenceTime):
self.add_query_param('AlertConfig.SilenceTime',AlertConfigSilenceTime)
def get_TaskOptionHttpResponseCharset(self):
return self.get_query_params().get('TaskOption.HttpResponseCharset')
def set_TaskOptionHttpResponseCharset(self,TaskOptionHttpResponseCharset):
self.add_query_param('TaskOption.HttpResponseCharset',TaskOptionHttpResponseCharset)
def get_TaskOptionHttpNegative(self):
return self.get_query_params().get('TaskOption.HttpNegative')
def set_TaskOptionHttpNegative(self,TaskOptionHttpNegative):
self.add_query_param('TaskOption.HttpNegative',TaskOptionHttpNegative)
def get_TaskOptionInterval(self):
return self.get_query_params().get('TaskOption.Interval')
def set_TaskOptionInterval(self,TaskOptionInterval):
self.add_query_param('TaskOption.Interval',TaskOptionInterval)
def get_AlertConfigNotifyType(self):
return self.get_query_params().get('AlertConfig.NotifyType')
def set_AlertConfigNotifyType(self,AlertConfigNotifyType):
self.add_query_param('AlertConfig.NotifyType',AlertConfigNotifyType)
def get_TaskOptionTelnetOrPingHost(self):
return self.get_query_params().get('TaskOption.TelnetOrPingHost')
def set_TaskOptionTelnetOrPingHost(self,TaskOptionTelnetOrPingHost):
self.add_query_param('TaskOption.TelnetOrPingHost',TaskOptionTelnetOrPingHost)
def get_TaskOptionHttpResponseMatchContent(self):
return self.get_query_params().get('TaskOption.HttpResponseMatchContent')
def set_TaskOptionHttpResponseMatchContent(self,TaskOptionHttpResponseMatchContent):
self.add_query_param('TaskOption.HttpResponseMatchContent',TaskOptionHttpResponseMatchContent)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_InstanceLists(self):
return self.get_query_params().get('InstanceList')
def set_InstanceLists(self, InstanceLists):
for depth1 in range(len(InstanceLists)):
if InstanceLists[depth1] is not None:
self.add_query_param('InstanceList.' + str(depth1 + 1) , InstanceLists[depth1])
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_AlertConfigEndTime(self):
return self.get_query_params().get('AlertConfig.EndTime')
def set_AlertConfigEndTime(self,AlertConfigEndTime):
self.add_query_param('AlertConfig.EndTime',AlertConfigEndTime)
def get_TaskOptionHttpURI(self):
return self.get_query_params().get('TaskOption.HttpURI')
def set_TaskOptionHttpURI(self,TaskOptionHttpURI):
self.add_query_param('TaskOption.HttpURI',TaskOptionHttpURI)
def get_TaskScope(self):
return self.get_query_params().get('TaskScope')
def set_TaskScope(self,TaskScope):
self.add_query_param('TaskScope',TaskScope)
def get_TaskOptionHttpPostContent(self):
return self.get_query_params().get('TaskOption.HttpPostContent')
def set_TaskOptionHttpPostContent(self,TaskOptionHttpPostContent):
self.add_query_param('TaskOption.HttpPostContent',TaskOptionHttpPostContent)
def get_AlertConfigStartTime(self):
return self.get_query_params().get('AlertConfig.StartTime')
def set_AlertConfigStartTime(self,AlertConfigStartTime):
self.add_query_param('AlertConfig.StartTime',AlertConfigStartTime)
def get_AlertConfigWebHook(self):
return self.get_query_params().get('AlertConfig.WebHook')
def set_AlertConfigWebHook(self,AlertConfigWebHook):
self.add_query_param('AlertConfig.WebHook',AlertConfigWebHook) | 42.227848 | 143 | 0.789418 |
acdfcee30d4e0bc77d78df3ffbee12e7f89f4738 | 930 | py | Python | advent2015/day02.py | Ginsusamurai/advent_2015 | 2ff9fd220f6cb76cd2be292df9c4347406e22252 | [
"MIT"
] | null | null | null | advent2015/day02.py | Ginsusamurai/advent_2015 | 2ff9fd220f6cb76cd2be292df9c4347406e22252 | [
"MIT"
] | null | null | null | advent2015/day02.py | Ginsusamurai/advent_2015 | 2ff9fd220f6cb76cd2be292df9c4347406e22252 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import csv
def returnNum(character):
return int(character)
if __name__ == "__main__":
with open("inputs/day02input.csv", newline='') as f:
reader = csv.reader(f)
packages = list(reader)
#Day02.1
totalFeet = 0
ribbon = 0
for dimensions in packages:
areas = []
raw = dimensions[0].split('x')
lwh = list(map(lambda x: int(x), raw))
lwh.sort()
ribbon = ribbon + (2 * lwh[0] + 2 * lwh[1]) + lwh[0] * lwh[1] * lwh[2]
areas.append(2 * lwh[0] * lwh[1])
areas.append(2 * lwh[1] * lwh[2])
areas.append(2 * lwh[0] * lwh[2])
areas.sort()
print(areas)
totalFeet = totalFeet + sum(areas) + int(areas[0]/2)
print("The Elves need a total of {} feet of paper".format(totalFeet))
print("The Elves also need {} feet of ribbon".format(ribbon)) | 25.135135 | 79 | 0.537634 |
acdfcf3bf7236f12a48774945395a4272cb6ff30 | 14,231 | py | Python | Source/chrome/tools/telemetry/telemetry/web_perf/metrics/gpu_timeline_unittest.py | yury-s/v8-inspector | 0ab4779e0909d387f243f41ca2621237cdb0c7fe | [
"BSD-3-Clause"
] | 20 | 2015-08-26T06:46:00.000Z | 2019-02-27T09:05:58.000Z | Source/chrome/tools/telemetry/telemetry/web_perf/metrics/gpu_timeline_unittest.py | yury-s/v8-inspector | 0ab4779e0909d387f243f41ca2621237cdb0c7fe | [
"BSD-3-Clause"
] | null | null | null | Source/chrome/tools/telemetry/telemetry/web_perf/metrics/gpu_timeline_unittest.py | yury-s/v8-inspector | 0ab4779e0909d387f243f41ca2621237cdb0c7fe | [
"BSD-3-Clause"
] | 2 | 2015-08-26T05:49:35.000Z | 2020-02-03T20:22:43.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.timeline import async_slice as async_slice_module
from telemetry.timeline import model as model_module
from telemetry.timeline import slice as slice_module
from telemetry.unittest_util import test_page_test_results
from telemetry.web_perf.metrics import gpu_timeline
from telemetry.web_perf import timeline_interaction_record as tir_module
SERVICE_FRAME_END_CATEGORY, SERVICE_FRAME_END_NAME = \
gpu_timeline.SERVICE_FRAME_END_MARKER
DEVICE_FRAME_END_CATEGORY, DEVICE_FRAME_END_NAME = \
gpu_timeline.DEVICE_FRAME_END_MARKER
INTERACTION_RECORDS = [tir_module.TimelineInteractionRecord("test-record",
0,
float('inf'))]
def _CreateGPUSlices(parent_thread, name, start_time, duration, offset=0):
args = {'gl_category': gpu_timeline.TOPLEVEL_GL_CATEGORY}
return (slice_module.Slice(parent_thread,
gpu_timeline.TOPLEVEL_SERVICE_CATEGORY,
name, start_time,
args=args,
duration=duration,
thread_duration=duration),
async_slice_module.AsyncSlice(gpu_timeline.TOPLEVEL_DEVICE_CATEGORY,
name, start_time + offset,
args=args,
duration=duration))
def _CreateFrameEndSlices(parent_thread, start_time, duration, offset=0):
args = {'gl_category': gpu_timeline.TOPLEVEL_GL_CATEGORY}
return (slice_module.Slice(parent_thread,
SERVICE_FRAME_END_CATEGORY,
SERVICE_FRAME_END_NAME,
start_time,
args=args,
duration=duration,
thread_duration=duration),
async_slice_module.AsyncSlice(DEVICE_FRAME_END_CATEGORY,
DEVICE_FRAME_END_NAME,
start_time + offset,
args=args,
duration=duration))
def _AddSliceToThread(parent_thread, slice_item):
if isinstance(slice_item, slice_module.Slice):
parent_thread.PushSlice(slice_item)
elif isinstance(slice_item, async_slice_module.AsyncSlice):
parent_thread.AddAsyncSlice(slice_item)
else:
assert False, "Invalid Slice Item Type: %s" % type(slice_item)
class GPUTimelineTest(unittest.TestCase):
def GetResults(self, metric, model, renderer_thread, interaction_records):
results = test_page_test_results.TestPageTestResults(self)
metric.AddResults(model, renderer_thread, interaction_records, results)
return results
def testExpectedResults(self):
"""Test a simply trace will output all expected results."""
model = model_module.TimelineModel()
test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
for slice_item in _CreateGPUSlices(test_thread, 'test_item', 100, 10):
_AddSliceToThread(test_thread, slice_item)
model.FinalizeImport()
metric = gpu_timeline.GPUTimelineMetric()
results = self.GetResults(metric, model=model, renderer_thread=test_thread,
interaction_records=INTERACTION_RECORDS)
for name, src_type in (('swap', None), ('total', 'cpu'), ('total', 'gpu')):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, src_type, 'max'), 'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, src_type, 'mean'), 'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, src_type, 'stddev'), 'ms', 0)
for tracked_name in gpu_timeline.TRACKED_GL_CONTEXT_NAME.values():
for source_type in ('cpu', 'gpu'):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(tracked_name, source_type, 'max'),
'ms', 0)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(tracked_name, source_type, 'mean'),
'ms', 0)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(tracked_name, source_type, 'stddev'),
'ms', 0)
def testNoDeviceTraceResults(self):
"""Test expected results when missing device traces."""
model = model_module.TimelineModel()
test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
service_slice, _ = _CreateGPUSlices(test_thread, 'test_item', 100, 10)
_AddSliceToThread(test_thread, service_slice)
model.FinalizeImport()
metric = gpu_timeline.GPUTimelineMetric()
results = self.GetResults(metric, model=model, renderer_thread=test_thread,
interaction_records=INTERACTION_RECORDS)
for name, source_type in (('swap', None), ('total', 'cpu')):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, source_type, 'max'), 'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, source_type, 'mean'), 'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, source_type, 'stddev'), 'ms', 0)
self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
gpu_timeline.TimelineName('total', 'gpu', 'max'))
self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
gpu_timeline.TimelineName('total', 'gpu', 'mean'))
self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
gpu_timeline.TimelineName('total', 'gpu', 'stddev'))
for name in gpu_timeline.TRACKED_GL_CONTEXT_NAME.values():
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, 'cpu', 'max'), 'ms', 0)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, 'cpu', 'mean'), 'ms', 0)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, 'cpu', 'stddev'), 'ms', 0)
self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
gpu_timeline.TimelineName(name, 'gpu', 'max'))
self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
gpu_timeline.TimelineName(name, 'gpu', 'mean'))
self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
gpu_timeline.TimelineName(name, 'gpu', 'stddev'))
def testFrameSeparation(self):
"""Test frames are correctly calculated using the frame end marker."""
model = model_module.TimelineModel()
test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
# First frame is 10 seconds.
for slice_item in _CreateGPUSlices(test_thread, 'test_item', 100, 10):
_AddSliceToThread(test_thread, slice_item)
# Mark frame end.
for slice_item in _CreateFrameEndSlices(test_thread, 105, 5):
_AddSliceToThread(test_thread, slice_item)
# Second frame is 20 seconds.
for slice_item in _CreateGPUSlices(test_thread, 'test_item', 110, 20):
_AddSliceToThread(test_thread, slice_item)
model.FinalizeImport()
metric = gpu_timeline.GPUTimelineMetric()
results = self.GetResults(metric, model=model, renderer_thread=test_thread,
interaction_records=INTERACTION_RECORDS)
for name, source_type in (('swap', None),
('total', 'cpu'),
('total', 'gpu')):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, source_type, 'max'), 'ms', 20)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, source_type, 'mean'), 'ms', 15)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, source_type, 'stddev'), 'ms', 5)
def testFrameSeparationBeforeMarker(self):
"""Test frames are correctly calculated using the frame end marker."""
model = model_module.TimelineModel()
test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
# Mark frame end.
for slice_item in _CreateFrameEndSlices(test_thread, 105, 5):
_AddSliceToThread(test_thread, slice_item)
# First frame is 10 seconds.
for slice_item in _CreateGPUSlices(test_thread, 'test_item', 100, 10):
_AddSliceToThread(test_thread, slice_item)
# Second frame is 20 seconds.
for slice_item in _CreateGPUSlices(test_thread, 'test_item', 110, 20):
_AddSliceToThread(test_thread, slice_item)
model.FinalizeImport()
metric = gpu_timeline.GPUTimelineMetric()
results = self.GetResults(metric, model=model, renderer_thread=test_thread,
interaction_records=INTERACTION_RECORDS)
for name, src_type in (('swap', None), ('total', 'cpu'), ('total', 'gpu')):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, src_type, 'max'), 'ms', 20)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, src_type, 'mean'), 'ms', 15)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(name, src_type, 'stddev'), 'ms', 5)
def testTrackedNameTraces(self):
"""Be sure tracked names are being recorded correctly."""
self.assertGreater(len(gpu_timeline.TRACKED_GL_CONTEXT_NAME), 0)
marker, result = gpu_timeline.TRACKED_GL_CONTEXT_NAME.iteritems().next()
model = model_module.TimelineModel()
test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
for slice_item in _CreateGPUSlices(test_thread, marker, 100, 10):
_AddSliceToThread(test_thread, slice_item)
model.FinalizeImport()
metric = gpu_timeline.GPUTimelineMetric()
results = self.GetResults(metric, model=model, renderer_thread=test_thread,
interaction_records=INTERACTION_RECORDS)
for source_type in ('cpu', 'gpu'):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result, source_type, 'max'),
'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result, source_type, 'mean'),
'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result, source_type, 'stddev'),
'ms', 0)
def testTrackedNameWithContextIDTraces(self):
"""Be sure tracked names with context IDs are recorded correctly."""
self.assertGreater(len(gpu_timeline.TRACKED_GL_CONTEXT_NAME), 0)
marker, result = gpu_timeline.TRACKED_GL_CONTEXT_NAME.iteritems().next()
context_id = '-0x1234'
model = model_module.TimelineModel()
test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
for slice_item in _CreateGPUSlices(test_thread, marker + context_id,
100, 10):
_AddSliceToThread(test_thread, slice_item)
model.FinalizeImport()
metric = gpu_timeline.GPUTimelineMetric()
results = self.GetResults(metric, model=model, renderer_thread=test_thread,
interaction_records=INTERACTION_RECORDS)
for source_type in ('cpu', 'gpu'):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result, source_type, 'max'),
'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result, source_type, 'mean'),
'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result, source_type, 'stddev'),
'ms', 0)
def testOutOfOrderDeviceTraces(self):
"""Out of order device traces are still matched up to correct services."""
self.assertGreaterEqual(len(gpu_timeline.TRACKED_GL_CONTEXT_NAME), 2)
tracked_names_iter = gpu_timeline.TRACKED_GL_CONTEXT_NAME.iteritems()
marker1_name, result1_name = tracked_names_iter.next()
result2_name = result1_name
while result2_name == result1_name:
marker2_name, result2_name = tracked_names_iter.next()
model = model_module.TimelineModel()
test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
# marker1 lasts for 10 seconds.
service_item1, device_item1 = _CreateGPUSlices(test_thread, marker1_name,
100, 10)
# marker2 lasts for 20 seconds.
service_item2, device_item2 = _CreateGPUSlices(test_thread, marker2_name,
200, 20)
# Append out of order
_AddSliceToThread(test_thread, service_item1)
_AddSliceToThread(test_thread, service_item2)
_AddSliceToThread(test_thread, device_item2)
_AddSliceToThread(test_thread, device_item1)
model.FinalizeImport()
metric = gpu_timeline.GPUTimelineMetric()
results = self.GetResults(metric, model=model, renderer_thread=test_thread,
interaction_records=INTERACTION_RECORDS)
for source_type in ('cpu', 'gpu'):
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result1_name, source_type, 'max'),
'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result1_name, source_type, 'mean'),
'ms', 10)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result1_name, source_type, 'stddev'),
'ms', 0)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result2_name, source_type, 'max'),
'ms', 20)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result2_name, source_type, 'mean'),
'ms', 20)
results.AssertHasPageSpecificScalarValue(
gpu_timeline.TimelineName(result2_name, source_type, 'stddev'),
'ms', 0)
| 45.321656 | 79 | 0.675076 |
acdfcf7d534f7648a36a403429ec0b318ded2cfd | 509 | py | Python | script/version.py | RobbeSneyders/a2wsgi | 3c5d70ae29413fffae678623da7abc75b284f4d3 | [
"Apache-2.0"
] | 51 | 2020-05-22T00:53:41.000Z | 2022-03-30T11:41:04.000Z | script/version.py | RobbeSneyders/a2wsgi | 3c5d70ae29413fffae678623da7abc75b284f4d3 | [
"Apache-2.0"
] | 17 | 2020-07-13T05:21:54.000Z | 2022-03-24T16:17:36.000Z | script/version.py | RobbeSneyders/a2wsgi | 3c5d70ae29413fffae678623da7abc75b284f4d3 | [
"Apache-2.0"
] | 12 | 2020-10-22T09:09:10.000Z | 2022-03-18T11:39:36.000Z | import importlib
import os
import sys
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_version() -> str:
"""
Return version.
"""
sys.path.insert(0, here)
return importlib.import_module("a2wsgi").__version__
os.chdir(here)
os.system(f"poetry version {get_version()}")
os.system("git add a2wsgi/* pyproject.toml")
os.system(f'git commit -m "v{get_version()}"')
os.system("git push")
os.system("git tag v{0}".format(get_version()))
os.system("git push --tags")
| 22.130435 | 66 | 0.689587 |
acdfcf7ea35d21c3ff204a7a9667d10b87958ba6 | 1,084 | py | Python | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Paging/autorestpagingtestservice/models/custom_parameter_group.py | fhoering/autorest | b36c77ebb6a5c92aca72eea0894a683506af5817 | [
"MIT"
] | null | null | null | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Paging/autorestpagingtestservice/models/custom_parameter_group.py | fhoering/autorest | b36c77ebb6a5c92aca72eea0894a683506af5817 | [
"MIT"
] | null | null | null | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Paging/autorestpagingtestservice/models/custom_parameter_group.py | fhoering/autorest | b36c77ebb6a5c92aca72eea0894a683506af5817 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CustomParameterGroup(Model):
"""Additional parameters for a set of operations, such as:
Paging_getMultiplePagesFragmentWithGroupingNextLink,
Paging_nextFragmentWithGrouping.
:param api_version: Sets the api version to use.
:type api_version: str
:param tenant: Sets the tenant to use.
:type tenant: str
"""
_validation = {
'api_version': {'required': True},
'tenant': {'required': True},
}
def __init__(self, api_version, tenant):
self.api_version = api_version
self.tenant = tenant
| 31.882353 | 76 | 0.609779 |
acdfcf90fc829938d4c5d7834b4758337d0980e3 | 146 | py | Python | python/anyascii/_data/_2d7.py | casept/anyascii | d4f426b91751254b68eaa84c6cd23099edd668e6 | [
"ISC"
] | null | null | null | python/anyascii/_data/_2d7.py | casept/anyascii | d4f426b91751254b68eaa84c6cd23099edd668e6 | [
"ISC"
] | null | null | null | python/anyascii/_data/_2d7.py | casept/anyascii | d4f426b91751254b68eaa84c6cd23099edd668e6 | [
"ISC"
] | null | null | null | b=' Ling Khan Ai' | 146 | 146 | 0.075342 |
acdfcfa636cdf79af56d296fec43122ebae571b0 | 859 | py | Python | contrib/tx_fee.py | Groestlcoin/electrs | f057280ebcd819dada6eb8478d9b15ee048c7b3c | [
"MIT"
] | null | null | null | contrib/tx_fee.py | Groestlcoin/electrs | f057280ebcd819dada6eb8478d9b15ee048c7b3c | [
"MIT"
] | null | null | null | contrib/tx_fee.py | Groestlcoin/electrs | f057280ebcd819dada6eb8478d9b15ee048c7b3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import client
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='localhost')
parser.add_argument("txid")
args = parser.parse_args()
conn = client.Client((args.host, 50001))
tx, = conn.call([client.request("blockchain.transaction.get", args.txid, True)])
requests = []
for vin in tx["vin"]:
prev_txid = vin["txid"]
requests.append(client.request("blockchain.transaction.get", prev_txid, True))
fee = 0
for vin, prev_tx in zip(tx["vin"], conn.call(requests)):
txo = prev_tx["vout"][vin["vout"]]
fee += txo["value"]
fee -= sum(vout["value"] for vout in tx["vout"])
print(f'vSize = {tx["vsize"]}, Fee = {1e3 * fee:.2f} mGRS = {1e8 * fee / tx["vsize"]:.2f} gro/vB')
if __name__ == "__main__":
main()
| 29.62069 | 102 | 0.61234 |
acdfd008ffad5818e67bdb59faba373b4438add5 | 1,760 | py | Python | LLC_Membranes/llclib/fast_rotate.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 4 | 2019-06-18T15:26:49.000Z | 2021-08-11T18:57:39.000Z | LLC_Membranes/llclib/fast_rotate.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 2 | 2019-08-22T20:11:46.000Z | 2019-08-22T22:35:17.000Z | LLC_Membranes/llclib/fast_rotate.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 4 | 2019-07-06T15:41:53.000Z | 2021-01-27T17:59:13.000Z | #!/usr/bin/env python
import numpy as np
import math
def quadrant(pt, origin=[0, 0]):
""" Find out which quadrant of the xy plane a point is sitting in
II | I
|
-------------
|
III | IV
:param: pt: point to be tested
:param: origin: the location of the origin. Default is [0, 0] but can be set arbitrarily (such as a pore center)
"""
if pt[0] > origin[0] and pt[1] > origin[1]:
return 1
elif pt[0] < origin[0] and pt[1] < origin[1]:
return 3
elif pt[0] > origin[0] and pt[1] < origin[1]:
return 4
elif pt[0] < origin[0] and pt[1] > origin[1]:
return 2
else:
return 0 # the case where the point lies on the x or y axis
def rotate_z(theta):
"""
formerly 'rotate'
:param: angle by which to rotate the monomer
:return: Rotation matrix to rotate input vector about z-axis
"""
Rx = np.zeros([3, 3]) # makes a 3 x 3 zero matrix
Rx[0, 0] = math.cos(theta)
Rx[1, 0] = math.sin(theta)
Rx[0, 1] = -math.sin(theta)
Rx[1, 1] = math.cos(theta)
Rx[2, 2] = 1
return Rx
def rotate_vector(xyz, v1, v2):
"""
:param xyz: xyz coordinates of object to be rotated
:param v1: original vector
:param v2: direction you want v1 to be pointing in
:return: rotated coordinates
"""
quad = quadrant(v1)
# first find the angle between v1 and v2
num = np.dot(v1, v2)
denom = np.linalg.norm(v1) * np.linalg.norm(v2)
theta = np.arccos(num / denom)
if quad == 1 or quad == 2:
Rz = rotate_z(-theta)
else:
Rz = rotate_z(theta)
pos = np.zeros_like(xyz)
for i in range(np.shape(xyz)[0]):
pos[i, :] = np.dot(Rz, xyz[i, :])
return pos | 26.268657 | 116 | 0.566477 |
acdfd05dd30fd7dbfe9f2533779d9937b8ce57ea | 142,802 | py | Python | dask/array/tests/test_array_core.py | Juanlu001/dask | ba29ba377ae71e5a90fa5ef5198c7d317b45c06a | [
"BSD-3-Clause"
] | 1 | 2021-11-02T18:53:23.000Z | 2021-11-02T18:53:23.000Z | dask/array/tests/test_array_core.py | Juanlu001/dask | ba29ba377ae71e5a90fa5ef5198c7d317b45c06a | [
"BSD-3-Clause"
] | 1 | 2021-10-07T09:57:58.000Z | 2021-10-07T09:57:58.000Z | dask/array/tests/test_array_core.py | Juanlu001/dask | ba29ba377ae71e5a90fa5ef5198c7d317b45c06a | [
"BSD-3-Clause"
] | 1 | 2022-02-26T15:15:40.000Z | 2022-02-26T15:15:40.000Z | import contextlib
import copy
import xml.etree.ElementTree
from unittest import mock
import pytest
np = pytest.importorskip("numpy")
import operator
import os
import time
import warnings
from io import StringIO
from operator import add, sub
from threading import Lock
from numpy import nancumprod, nancumsum
from tlz import concat, countby, merge
from tlz.curried import identity
import dask
import dask.array as da
from dask.array.core import (
Array,
blockdims_from_blockshape,
broadcast_chunks,
broadcast_shapes,
broadcast_to,
common_blockdim,
concatenate,
concatenate3,
concatenate_axes,
dotmany,
from_array,
from_delayed,
from_func,
getem,
getter,
normalize_chunks,
optimize,
stack,
store,
)
from dask.array.utils import assert_eq, same_keys
from dask.base import compute_as_if_collection, tokenize
from dask.blockwise import broadcast_dimensions
from dask.blockwise import make_blockwise_graph as top
from dask.blockwise import optimize_blockwise
from dask.delayed import Delayed, delayed
from dask.utils import apply, key_split, tmpdir, tmpfile
from dask.utils_test import dec, inc
from ..chunk import getitem
from .test_dispatch import EncapsulateNDArray
def test_getem():
sol = {
("X", 0, 0): (getter, "X", (slice(0, 2), slice(0, 3))),
("X", 1, 0): (getter, "X", (slice(2, 4), slice(0, 3))),
("X", 1, 1): (getter, "X", (slice(2, 4), slice(3, 6))),
("X", 0, 1): (getter, "X", (slice(0, 2), slice(3, 6))),
}
assert getem("X", (2, 3), shape=(4, 6)) == sol
def test_top():
assert top(inc, "z", "ij", "x", "ij", numblocks={"x": (2, 2)}) == {
("z", 0, 0): (inc, ("x", 0, 0)),
("z", 0, 1): (inc, ("x", 0, 1)),
("z", 1, 0): (inc, ("x", 1, 0)),
("z", 1, 1): (inc, ("x", 1, 1)),
}
assert top(
add, "z", "ij", "x", "ij", "y", "ij", numblocks={"x": (2, 2), "y": (2, 2)}
) == {
("z", 0, 0): (add, ("x", 0, 0), ("y", 0, 0)),
("z", 0, 1): (add, ("x", 0, 1), ("y", 0, 1)),
("z", 1, 0): (add, ("x", 1, 0), ("y", 1, 0)),
("z", 1, 1): (add, ("x", 1, 1), ("y", 1, 1)),
}
assert top(
dotmany, "z", "ik", "x", "ij", "y", "jk", numblocks={"x": (2, 2), "y": (2, 2)}
) == {
("z", 0, 0): (dotmany, [("x", 0, 0), ("x", 0, 1)], [("y", 0, 0), ("y", 1, 0)]),
("z", 0, 1): (dotmany, [("x", 0, 0), ("x", 0, 1)], [("y", 0, 1), ("y", 1, 1)]),
("z", 1, 0): (dotmany, [("x", 1, 0), ("x", 1, 1)], [("y", 0, 0), ("y", 1, 0)]),
("z", 1, 1): (dotmany, [("x", 1, 0), ("x", 1, 1)], [("y", 0, 1), ("y", 1, 1)]),
}
assert top(identity, "z", "", "x", "ij", numblocks={"x": (2, 2)}) == {
("z",): (identity, [[("x", 0, 0), ("x", 0, 1)], [("x", 1, 0), ("x", 1, 1)]])
}
def test_top_with_kwargs():
assert top(add, "z", "i", "x", "i", numblocks={"x": (2, 0)}, b=100) == {
("z", 0): (apply, add, [("x", 0)], {"b": 100}),
("z", 1): (apply, add, [("x", 1)], {"b": 100}),
}
def test_top_supports_broadcasting_rules():
assert top(
add, "z", "ij", "x", "ij", "y", "ij", numblocks={"x": (1, 2), "y": (2, 1)}
) == {
("z", 0, 0): (add, ("x", 0, 0), ("y", 0, 0)),
("z", 0, 1): (add, ("x", 0, 1), ("y", 0, 0)),
("z", 1, 0): (add, ("x", 0, 0), ("y", 1, 0)),
("z", 1, 1): (add, ("x", 0, 1), ("y", 1, 0)),
}
def test_top_literals():
assert top(add, "z", "ij", "x", "ij", 123, None, numblocks={"x": (2, 2)}) == {
("z", 0, 0): (add, ("x", 0, 0), 123),
("z", 0, 1): (add, ("x", 0, 1), 123),
("z", 1, 0): (add, ("x", 1, 0), 123),
("z", 1, 1): (add, ("x", 1, 1), 123),
}
def test_blockwise_literals():
x = da.ones((10, 10), chunks=(5, 5))
z = da.blockwise(add, "ij", x, "ij", 100, None, dtype=x.dtype)
assert_eq(z, x + 100)
z = da.blockwise(
lambda x, y, z: x * y + z, "ij", 2, None, x, "ij", 100, None, dtype=x.dtype
)
assert_eq(z, 2 * x + 100)
z = da.blockwise(getitem, "ij", x, "ij", slice(None), None, dtype=x.dtype)
assert_eq(z, x)
def test_blockwise_1_in_shape_I():
def test_f(a, b):
assert 1 in b.shape
p, k, N = 7, 2, 5
da.blockwise(
test_f,
"x",
da.zeros((2 * p, 9, k * N), chunks=(p, 3, k)),
"xzt",
da.zeros((2 * p, 9, 1), chunks=(p, 3, -1)),
"xzt",
concatenate=True,
dtype=float,
).compute()
def test_blockwise_1_in_shape_II():
def test_f(a, b):
assert 1 in b.shape
p, k, N = 7, 2, 5
da.blockwise(
test_f,
"x",
da.zeros((2 * p, 9, k * N, 8), chunks=(p, 9, k, 4)),
"xztu",
da.zeros((2 * p, 9, 1, 8), chunks=(p, 9, -1, 4)),
"xztu",
concatenate=True,
dtype=float,
).compute()
def test_blockwise_1_in_shape_III():
def test_f(a, b):
assert 1 in b.shape
k, N = 2, 5
da.blockwise(
test_f,
"x",
da.zeros((k * N, 9, 8), chunks=(k, 3, 4)),
"xtu",
da.zeros((1, 9, 8), chunks=(-1, 3, 4)),
"xtu",
concatenate=True,
dtype=float,
).compute()
def test_concatenate3_on_scalars():
assert_eq(concatenate3([1, 2]), np.array([1, 2]))
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {"x": x, "o": o}
getx = getem("x", (5, 5), shape=(20, 20))
geto = getem("o", (5, 5), shape=(20, 20))
result = top(
dotmany, "out", "ik", "x", "ij", "o", "jk", numblocks={"x": (4, 4), "o": (4, 4)}
)
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[("out", i, j) for j in range(4)] for i in range(4)])
assert_eq(np.dot(x, o), concatenate3(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {"x": x}
getx = getem("x", (5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, "out", "ij", "x", "ji", numblocks={"x": (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[("out", i, j) for j in range(4)] for i in range(4)])
assert_eq(concatenate3(out), x.T + 1)
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [("x", "i")]
numblocks = {"x": ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {"i": (1,)}
def test_broadcast_dimensions():
argpairs = [("x", "ij"), ("y", "ij")]
d = {"x": ("Hello", 1), "y": (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {"i": "Hello", "j": (2, 3)}
def test_Array():
shape = (1000, 1000)
chunks = (100, 100)
name = "x"
dsk = merge({name: "some-array"}, getem(name, chunks, shape=shape))
a = Array(dsk, name, chunks, shape=shape, dtype="f8")
assert a.numblocks == (10, 10)
assert a.__dask_keys__() == [[("x", i, j) for j in range(10)] for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
with pytest.raises(ValueError):
Array(dsk, name, chunks, shape=shape)
with pytest.raises(TypeError):
Array(dsk, name, chunks, shape=shape, dtype="f8", meta=np.empty(0, 0))
def test_uneven_chunks():
a = Array({}, "x", chunks=(3, 3), shape=(10, 10), dtype="f8")
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
chunks = (10, 10)
name = "x"
dsk = merge({name: "some-array"}, getem(name, shape=shape, chunks=chunks))
a = Array(dsk, name, chunks, shape=shape, dtype="f8")
assert set(concat(a.__dask_keys__())) == {("x", i, 0) for i in range(10)}
def test_keys():
dsk = dict((("x", i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, "x", chunks=(10, 10), shape=(50, 60), dtype="f8")
assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)] for i in range(5)]
# Cache works
assert dx.__dask_keys__() is dx.__dask_keys__()
# Test mutating names clears key cache
dx.dask = {("y", i, j): () for i in range(5) for j in range(6)}
dx._name = "y"
assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)] for i in range(5)]
d = Array({}, "x", (), shape=(), dtype="f8")
assert d.__dask_keys__() == [("x",)]
def test_Array_computation():
a = Array({("x", 0, 0): np.eye(3)}, "x", shape=(3, 3), chunks=(3, 3), dtype="f8")
assert_eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
def test_Array_numpy_gufunc_call__array_ufunc__01():
x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))
nx = x.compute()
ny = np.linalg._umath_linalg.inv(nx)
y = np.linalg._umath_linalg.inv(x)
assert_eq(ny, y)
def test_Array_numpy_gufunc_call__array_ufunc__02():
x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))
nx = x.compute()
nw, nv = np.linalg._umath_linalg.eig(nx)
w, v = np.linalg._umath_linalg.eig(x)
assert_eq(nw, w)
assert_eq(nv, v)
def test_stack():
a, b, c = [
Array(
getem(name, chunks=(2, 3), shape=(4, 6)),
name,
chunks=(2, 3),
dtype="f8",
shape=(4, 6),
)
for name in "ABC"
]
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.chunksize == (1, 2, 3)
assert s.dask[(s.name, 0, 1, 0)] == (getitem, ("A", 1, 0), (None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getitem, ("C", 1, 0), (None, colon, colon))
assert same_keys(s, stack([a, b, c], axis=0))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.chunksize == (2, 1, 3)
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ("B", 0, 0), (colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getitem, ("B", 1, 0), (colon, None, colon))
assert same_keys(s2, stack([a, b, c], axis=1))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.chunksize == (2, 3, 1)
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ("A", 0, 1), (colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getitem, ("C", 1, 1), (colon, colon, None))
assert same_keys(s2, stack([a, b, c], axis=2))
pytest.raises(ValueError, lambda: stack([]))
pytest.raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == stack([a, b, c], axis=2).chunks
def test_stack_zero_size():
x = np.empty((2, 0, 3))
y = da.from_array(x, chunks=1)
result_np = np.concatenate([x, x])
result_da = da.concatenate([y, y])
assert_eq(result_np, result_da)
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
chunks = compute_as_if_collection(Array, s.dask, s.__dask_keys__())
assert chunks[0][0].shape == (1, 1)
def test_stack_scalars():
d = da.arange(4, chunks=2)
s = da.stack([d.mean(), d.sum()])
assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]
def test_stack_promote_type():
i = np.arange(10, dtype="i4")
f = np.arange(10, dtype="f4")
di = da.from_array(i, chunks=5)
df = da.from_array(f, chunks=5)
res = da.stack([di, df])
assert_eq(res, np.stack([i, f]))
def test_stack_rechunk():
x = da.random.random(10, chunks=5)
y = da.random.random(10, chunks=4)
z = da.stack([x, y], axis=0)
assert z.shape == (2, 10)
assert z.chunks == ((1, 1), (4, 1, 3, 2))
assert_eq(z, np.stack([x.compute(), y.compute()], axis=0))
def test_stack_unknown_chunksizes():
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
a_df = pd.DataFrame({"x": np.arange(12)})
b_df = pd.DataFrame({"y": np.arange(12) * 10})
a_ddf = dd.from_pandas(a_df, sort=False, npartitions=3)
b_ddf = dd.from_pandas(b_df, sort=False, npartitions=3)
a_x = a_ddf.values
b_x = b_ddf.values
assert np.isnan(a_x.shape[0])
assert np.isnan(b_x.shape[0])
with pytest.raises(ValueError) as exc_info:
da.stack([a_x, b_x], axis=0)
assert "shape" in str(exc_info.value)
assert "nan" in str(exc_info.value)
c_x = da.stack([a_x, b_x], axis=0, allow_unknown_chunksizes=True)
assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=0))
with pytest.raises(ValueError) as exc_info:
da.stack([a_x, b_x], axis=1)
assert "shape" in str(exc_info.value)
assert "nan" in str(exc_info.value)
c_x = da.stack([a_x, b_x], axis=1, allow_unknown_chunksizes=True)
assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=1))
m_df = pd.DataFrame({"m": np.arange(12) * 100})
n_df = pd.DataFrame({"n": np.arange(12) * 1000})
m_ddf = dd.from_pandas(m_df, sort=False, npartitions=3)
n_ddf = dd.from_pandas(n_df, sort=False, npartitions=3)
m_x = m_ddf.values
n_x = n_ddf.values
assert np.isnan(m_x.shape[0])
assert np.isnan(n_x.shape[0])
with pytest.raises(ValueError) as exc_info:
da.stack([[a_x, b_x], [m_x, n_x]])
assert "shape" in str(exc_info.value)
assert "nan" in str(exc_info.value)
c_x = da.stack([[a_x, b_x], [m_x, n_x]], allow_unknown_chunksizes=True)
assert_eq(c_x, np.stack([[a_df.values, b_df.values], [m_df.values, n_df.values]]))
def test_concatenate():
a, b, c = [
Array(
getem(name, chunks=(2, 3), shape=(4, 6)),
name,
chunks=(2, 3),
dtype="f8",
shape=(4, 6),
)
for name in "ABC"
]
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ("A", 0, 1)
assert x.dask[(x.name, 5, 0)] == ("C", 1, 0)
assert same_keys(x, concatenate([a, b, c], axis=0))
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ("A", 1, 0)
assert y.dask[(y.name, 1, 5)] == ("C", 1, 1)
assert same_keys(y, concatenate([a, b, c], axis=1))
assert set(b.dask.keys()).issubset(y.dask.keys())
z = concatenate([a], axis=0)
assert z.shape == a.shape
assert z.chunks == a.chunks
assert z.dask == a.dask
assert z is a
assert (
concatenate([a, b, c], axis=-1).chunks == concatenate([a, b, c], axis=1).chunks
)
pytest.raises(ValueError, lambda: concatenate([]))
pytest.raises(ValueError, lambda: concatenate([a, b, c], axis=2))
@pytest.mark.parametrize(
"dtypes", [((">f8", ">f8"), "float64"), (("<f4", "<f8"), "float64")]
)
def test_concatenate_types(dtypes):
dts_in, dt_out = dtypes
arrs = [np.zeros(4, dtype=dt) for dt in dts_in]
darrs = [from_array(arr, chunks=(2,)) for arr in arrs]
x = concatenate(darrs, axis=0)
assert x.dtype == dt_out
def test_concatenate_unknown_axes():
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
a_df = pd.DataFrame({"x": np.arange(12)})
b_df = pd.DataFrame({"y": np.arange(12) * 10})
a_ddf = dd.from_pandas(a_df, sort=False, npartitions=3)
b_ddf = dd.from_pandas(b_df, sort=False, npartitions=3)
a_x = a_ddf.values
b_x = b_ddf.values
assert np.isnan(a_x.shape[0])
assert np.isnan(b_x.shape[0])
da.concatenate([a_x, b_x], axis=0) # works fine
with pytest.raises(ValueError) as exc_info:
da.concatenate([a_x, b_x], axis=1) # unknown chunks
assert "nan" in str(exc_info.value)
assert "allow_unknown_chunksize" in str(exc_info.value)
c_x = da.concatenate(
[a_x, b_x], axis=1, allow_unknown_chunksizes=True
) # unknown chunks
assert_eq(c_x, np.concatenate([a_df.values, b_df.values], axis=1))
def test_concatenate_rechunk():
x = da.random.random((6, 6), chunks=(3, 3))
y = da.random.random((6, 6), chunks=(2, 2))
z = da.concatenate([x, y], axis=0)
assert z.shape == (12, 6)
assert z.chunks == ((3, 3, 2, 2, 2), (2, 1, 1, 2))
assert_eq(z, np.concatenate([x.compute(), y.compute()], axis=0))
z = da.concatenate([x, y], axis=1)
assert z.shape == (6, 12)
assert z.chunks == ((2, 1, 1, 2), (3, 3, 2, 2, 2))
assert_eq(z, np.concatenate([x.compute(), y.compute()], axis=1))
def test_concatenate_fixlen_strings():
x = np.array(["a", "b", "c"])
y = np.array(["aa", "bb", "cc"])
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
assert_eq(np.concatenate([x, y]), da.concatenate([a, b]))
def test_concatenate_zero_size():
x = np.random.random(10)
y = da.from_array(x, chunks=3)
result_np = np.concatenate([x, x[:0]])
result_da = da.concatenate([y, y[:0]])
assert_eq(result_np, result_da)
assert result_da is y
# dtype of a size 0 arrays can affect the output dtype
result_np = np.concatenate([np.zeros(0, dtype=float), np.zeros(1, dtype=int)])
result_da = da.concatenate([da.zeros(0, dtype=float), da.zeros(1, dtype=int)])
assert_eq(result_np, result_da)
# All empty arrays case
result_np = np.concatenate([np.zeros(0), np.zeros(0)])
result_da = da.concatenate([da.zeros(0), da.zeros(0)])
assert_eq(result_np, result_da)
def test_block_simple_row_wise():
a1 = np.ones((2, 2))
a2 = 2 * a1
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([a1, a2])
result = da.block([d1, d2])
assert_eq(expected, result)
expected = np.block([a1, a2[:, :0]])
result = da.block([d1, d2[:, :0]])
assert result is d1
assert_eq(expected, result)
def test_block_simple_column_wise():
a1 = np.ones((2, 2))
a2 = 2 * a1
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1], [a2]])
result = da.block([[d1], [d2]])
assert_eq(expected, result)
def test_block_with_1d_arrays_row_wise():
# # # 1-D vectors are treated as row arrays
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([a1, a2])
result = da.block([d1, d2])
assert_eq(expected, result)
expected = np.block([a1, a2[:0]])
result = da.block([d1, d2[:0]])
assert result is d1
assert_eq(expected, result)
def test_block_with_1d_arrays_multiple_rows():
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1, a2], [a1, a2]])
result = da.block([[d1, d2], [d1, d2]])
assert_eq(expected, result)
def test_block_with_1d_arrays_column_wise():
# # # 1-D vectors are treated as row arrays
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1], [a2]])
result = da.block([[d1], [d2]])
assert_eq(expected, result)
def test_block_mixed_1d_and_2d():
a1 = np.ones((2, 2))
a2 = np.array([2, 2])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[d1], [d2]])
result = da.block([[a1], [a2]])
assert_eq(expected, result)
def test_block_complicated():
# a bit more complicated
a1 = np.array([[1, 1, 1]])
a2 = np.array([[2, 2, 2]])
a3 = np.array([[3, 3, 3, 3, 3, 3]])
a4 = np.array([4, 4, 4, 4, 4, 4])
a5 = np.array(5)
a6 = np.array([6, 6, 6, 6, 6])
a7 = np.zeros((2, 6))
d1 = da.asarray(a1)
d2 = da.asarray(a2)
d3 = da.asarray(a3)
d4 = da.asarray(a4)
d5 = da.asarray(a5)
d6 = da.asarray(a6)
d7 = da.asarray(a7)
expected = np.block([[a1, a2], [a3], [a4], [a5, a6], [a7]])
result = da.block([[d1, d2], [d3], [d4], [d5, d6], [d7]])
assert_eq(expected, result)
def test_block_nested():
a1 = np.array([1, 1, 1])
a2 = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
a3 = np.array([3, 3, 3])
a4 = np.array([4, 4, 4])
a5 = np.array(5)
a6 = np.array([6, 6, 6, 6, 6])
a7 = np.zeros((2, 6))
d1 = da.asarray(a1)
d2 = da.asarray(a2)
d3 = da.asarray(a3)
d4 = da.asarray(a4)
d5 = da.asarray(a5)
d6 = da.asarray(a6)
d7 = da.asarray(a7)
expected = np.block([[np.block([[a1], [a3], [a4]]), a2], [a5, a6], [a7]])
result = da.block([[da.block([[d1], [d3], [d4]]), d2], [d5, d6], [d7]])
assert_eq(expected, result)
def test_block_3d():
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
d000 = da.asarray(a000)
d100 = da.asarray(a100)
d010 = da.asarray(a010)
d001 = da.asarray(a001)
d011 = da.asarray(a011)
d101 = da.asarray(a101)
d110 = da.asarray(a110)
d111 = da.asarray(a111)
expected = np.block([[[a000, a001], [a010, a011]], [[a100, a101], [a110, a111]]])
result = da.block([[[d000, d001], [d010, d011]], [[d100, d101], [d110, d111]]])
assert_eq(expected, result)
expected = np.block(
[
[[a000, a001[:, :, :0]], [a010[:, :0, :], a011[:, :0, :0]]],
[[a100[:0, :, :], a101[:0, :, :0]], [a110[:0, :0, :], a111[:0, :0, :0]]],
]
)
result = da.block(
[
[[d000, d001[:, :, :0]], [d010[:, :0, :], d011[:, :0, :0]]],
[[d100[:0, :, :], d101[:0, :, :0]], [d110[:0, :0, :], d111[:0, :0, :0]]],
]
)
assert result is d000
assert_eq(expected, result)
def test_block_with_mismatched_shape():
a = np.array([0, 0])
b = np.eye(2)
for arrays in [[a, b], [b, a]]:
with pytest.raises(ValueError):
da.block(arrays)
def test_block_no_lists():
assert_eq(da.block(1), np.block(1))
assert_eq(da.block(np.eye(3)), np.block(np.eye(3)))
def test_block_invalid_nesting():
for arrays in [
[1, [2]],
[1, []],
[[1], 2],
[[], 2],
[[[1], [2]], [[3, 4]], [5]], # missing brackets
]:
with pytest.raises(ValueError) as e:
da.block(arrays)
e.match(r"depths are mismatched")
def test_block_empty_lists():
for arrays in [[], [[]], [[1], []]]:
with pytest.raises(ValueError) as e:
da.block(arrays)
e.match(r"empty")
def test_block_tuple():
for arrays in [([1, 2], [3, 4]), [(1, 2), (3, 4)]]:
with pytest.raises(TypeError) as e:
da.block(arrays)
e.match(r"tuple")
def test_broadcast_shapes():
with warnings.catch_warnings(record=True) as record:
assert () == broadcast_shapes()
assert (2, 5) == broadcast_shapes((2, 5))
assert (0, 5) == broadcast_shapes((0, 1), (1, 5))
assert np.allclose(
(2, np.nan), broadcast_shapes((1, np.nan), (2, 1)), equal_nan=True
)
assert np.allclose(
(2, np.nan), broadcast_shapes((2, 1), (1, np.nan)), equal_nan=True
)
assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ())
assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,))
assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4))
assert not record
pytest.raises(ValueError, lambda: broadcast_shapes((3,), (3, 4)))
pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (2, 3, 1)))
pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (1, np.nan)))
def test_elemwise_on_scalars():
x = np.arange(10, dtype=np.int64)
a = from_array(x, chunks=(5,))
assert len(a.__dask_keys__()) == 2
assert_eq(a.sum() ** 2, x.sum() ** 2)
y = np.arange(10, dtype=np.int32)
b = from_array(y, chunks=(5,))
result = a.sum() * b
# Dask 0-d arrays do not behave like numpy scalars for type promotion
assert result.dtype == np.int64
assert result.compute().dtype == np.int64
assert (x.sum() * y).dtype == np.int32
assert_eq((x.sum() * y).astype(np.int64), result)
def test_elemwise_with_ndarrays():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 3))
assert_eq(x + a, 2 * x)
assert_eq(a + x, 2 * x)
assert_eq(x + b, x + y)
assert_eq(b + x, x + y)
assert_eq(a + y, x + y)
assert_eq(y + a, x + y)
# Error on shape mismatch
pytest.raises(ValueError, lambda: a + y.T)
pytest.raises(ValueError, lambda: a + np.arange(2))
def test_elemwise_differently_chunked():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 2))
assert_eq(a + b, x + y)
assert_eq(b + a, x + y)
def test_elemwise_dtype():
values = [
da.from_array(np.ones(5, np.float32), chunks=3),
da.from_array(np.ones(5, np.int16), chunks=3),
da.from_array(np.ones(5, np.int64), chunks=3),
da.from_array(np.ones((), np.float64), chunks=()) * 1e200,
np.ones(5, np.float32),
1,
1.0,
1e200,
np.int64(1),
np.ones((), np.int64),
]
for x in values:
for y in values:
assert da.maximum(x, y).dtype == da.result_type(x, y)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert_eq(c, x + 1)
c = a + b
assert_eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b) ** 2 > 5
with pytest.warns(None): # ZeroDivisionWarning
assert_eq(expr, (3 / x * y) ** 2 > 5)
with pytest.warns(None): # OverflowWarning
c = da.exp(a)
assert_eq(c, np.exp(x))
assert_eq(abs(-a), a)
assert_eq(a, +x)
def test_operator_dtype_promotion():
x = np.arange(10, dtype=np.float32)
y = np.array([1])
a = from_array(x, chunks=(5,))
assert_eq(x + 1, a + 1) # still float32
assert_eq(x + 1e50, a + 1e50) # now float64
assert_eq(x + y, a + y) # also float64
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[("a", "i4"), ("b", "f4")])
y = from_array(x, chunks=(1,))
assert_eq(y["a"], x["a"])
assert_eq(y[["b", "a"]], x[["b", "a"]])
assert same_keys(y[["b", "a"]], y[["b", "a"]])
def test_field_access_with_shape():
dtype = [("col1", ("f4", (3, 2))), ("col2", ("f4", 3))]
data = np.ones((100, 50), dtype=dtype)
x = da.from_array(data, 10)
assert_eq(x["col1"], data["col1"])
assert_eq(x[["col1"]], data[["col1"]])
assert_eq(x["col2"], data["col2"])
assert_eq(x[["col1", "col2"]], data[["col1", "col2"]])
def test_matmul():
x = np.random.random((5, 5))
y = np.random.random((5, 2))
a = from_array(x, chunks=(1, 5))
b = from_array(y, chunks=(5, 1))
assert_eq(operator.matmul(a, b), a.dot(b))
assert_eq(operator.matmul(a, b), operator.matmul(x, y))
assert_eq(operator.matmul(a, y), operator.matmul(x, b))
list_vec = list(range(1, 6))
assert_eq(operator.matmul(list_vec, b), operator.matmul(list_vec, y))
assert_eq(operator.matmul(x, list_vec), operator.matmul(a, list_vec))
z = np.random.random((5, 5, 5))
c = from_array(z, chunks=(1, 5, 1))
assert_eq(operator.matmul(a, z), operator.matmul(x, c))
assert_eq(operator.matmul(z, a), operator.matmul(c, x))
def test_matmul_array_ufunc():
# regression test for https://github.com/dask/dask/issues/4353
x = np.random.random((5, 5))
y = np.random.random((5, 2))
a = from_array(x, chunks=(1, 5))
b = from_array(y, chunks=(5, 1))
result = b.__array_ufunc__(np.matmul, "__call__", a, b)
assert_eq(result, x.dot(y))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert_eq(x.T, a.T)
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [a.shape, (5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
xb = np.broadcast_to(x, shape)
ab = broadcast_to(a, shape)
assert_eq(xb, ab)
if a.shape == ab.shape:
assert a is ab
pytest.raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
pytest.raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_broadcast_to_array():
x = np.random.randint(10, size=(5, 1, 6))
for shape in [(5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
a = np.broadcast_to(x, shape)
d = broadcast_to(x, shape)
assert_eq(a, d)
def test_broadcast_to_scalar():
x = 5
for shape in [tuple(), (0,), (2, 3), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
a = np.broadcast_to(x, shape)
d = broadcast_to(x, shape)
assert_eq(a, d)
def test_broadcast_to_chunks():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape, chunks, expected_chunks in [
((5, 3, 6), (3, -1, 3), ((3, 2), (3,), (3, 3))),
((5, 3, 6), (3, 1, 3), ((3, 2), (1, 1, 1), (3, 3))),
((2, 5, 3, 6), (1, 3, 1, 3), ((1, 1), (3, 2), (1, 1, 1), (3, 3))),
]:
xb = np.broadcast_to(x, shape)
ab = broadcast_to(a, shape, chunks=chunks)
assert_eq(xb, ab)
assert ab.chunks == expected_chunks
with pytest.raises(ValueError):
broadcast_to(a, a.shape, chunks=((2, 3), (1,), (3, 3)))
with pytest.raises(ValueError):
broadcast_to(a, a.shape, chunks=((3, 2), (3,), (3, 3)))
with pytest.raises(ValueError):
broadcast_to(a, (5, 2, 6), chunks=((3, 2), (3,), (3, 3)))
def test_broadcast_arrays():
assert np.broadcast_arrays() == da.broadcast_arrays()
a = np.arange(4)
d_a = da.from_array(a, chunks=tuple(s // 2 for s in a.shape))
a_0 = np.arange(4)[None, :]
a_1 = np.arange(4)[:, None]
d_a_0 = d_a[None, :]
d_a_1 = d_a[:, None]
a_r = np.broadcast_arrays(a_0, a_1)
d_r = da.broadcast_arrays(d_a_0, d_a_1)
assert isinstance(d_r, list)
assert len(a_r) == len(d_r)
for e_a_r, e_d_r in zip(a_r, d_r):
assert_eq(e_a_r, e_d_r)
def test_broadcast_arrays_uneven_chunks():
x = da.ones(30, chunks=(3,))
y = da.ones(30, chunks=(5,))
z = np.broadcast_arrays(x, y)
assert_eq(z, z)
x = da.ones((1, 30), chunks=(1, 3))
y = da.ones(30, chunks=(5,))
z = np.broadcast_arrays(x, y)
assert_eq(z, z)
@pytest.mark.parametrize(
"u_shape, v_shape",
[
[tuple(), (2, 3)],
[(1,), (2, 3)],
[(1, 1), (2, 3)],
[(0, 3), (1, 3)],
[(2, 0), (2, 1)],
[(1, 0), (2, 1)],
[(0, 1), (1, 3)],
],
)
def test_broadcast_operator(u_shape, v_shape):
u = np.random.random(u_shape)
v = np.random.random(v_shape)
d_u = from_array(u, chunks=1)
d_v = from_array(v, chunks=1)
w = u * v
d_w = d_u * d_v
assert_eq(w, d_w)
@pytest.mark.parametrize(
"original_shape,new_shape,chunks",
[
((10,), (10,), (3, 3, 4)),
((10,), (10, 1, 1), 5),
((10,), (1, 10), 5),
((24,), (2, 3, 4), 12),
((1, 24), (2, 3, 4), 12),
((2, 3, 4), (24,), (1, 3, 4)),
((2, 3, 4), (24,), 4),
((2, 3, 4), (24, 1), 4),
((2, 3, 4), (1, 24), 4),
((4, 4, 1), (4, 4), 2),
((4, 4), (4, 4, 1), 2),
((1, 4, 4), (4, 4), 2),
((1, 4, 4), (4, 4, 1), 2),
((1, 4, 4), (1, 1, 4, 4), 2),
((4, 4), (1, 4, 4, 1), 2),
((4, 4), (1, 4, 4), 2),
((2, 3), (2, 3), (1, 2)),
((2, 3), (3, 2), 3),
((4, 2, 3), (4, 6), 4),
((3, 4, 5, 6), (3, 4, 5, 6), (2, 3, 4, 5)),
((), (1,), 1),
((1,), (), 1),
((24,), (3, 8), 24),
((24,), (4, 6), 6),
((24,), (4, 3, 2), 6),
((24,), (4, 6, 1), 6),
((24,), (4, 6), (6, 12, 6)),
((64, 4), (8, 8, 4), (16, 2)),
((4, 64), (4, 8, 4, 2), (2, 16)),
((4, 8, 4, 2), (2, 1, 2, 32, 2), (2, 4, 2, 2)),
((4, 1, 4), (4, 4), (2, 1, 2)),
((0, 10), (0, 5, 2), (5, 5)),
((5, 0, 2), (0, 10), (5, 2, 2)),
((0,), (2, 0, 2), (4,)),
((2, 0, 2), (0,), (4, 4, 4)),
],
)
def test_reshape(original_shape, new_shape, chunks):
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks=chunks)
xr = x.reshape(new_shape)
ar = a.reshape(new_shape)
if a.shape == new_shape:
assert a is ar
assert_eq(xr, ar)
def test_reshape_exceptions():
x = np.random.randint(10, size=(5,))
a = from_array(x, chunks=(2,))
with pytest.raises(ValueError):
da.reshape(a, (100,))
def test_reshape_splat():
x = da.ones((5, 5), chunks=(2, 2))
assert_eq(x.reshape((25,)), x.reshape(25))
def test_reshape_fails_for_dask_only():
cases = [((3, 4), (4, 3), 2)]
for original_shape, new_shape, chunks in cases:
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks=chunks)
assert x.reshape(new_shape).shape == new_shape
with pytest.raises(ValueError):
da.reshape(a, new_shape)
def test_reshape_unknown_dimensions():
for original_shape in [(24,), (2, 12), (2, 3, 4)]:
for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, 24)
assert_eq(x.reshape(new_shape), a.reshape(new_shape))
pytest.raises(ValueError, lambda: da.reshape(a, (-1, -1)))
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert_eq(d, np.full((3, 4), 2))
def test_map_blocks():
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert_eq(e, x + 1)
e = d.map_blocks(inc, name="increment")
assert e.name.startswith("increment-")
assert d.map_blocks(inc, name="foo").name != d.map_blocks(dec, name="foo").name
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert_eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(
lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)), dtype=d.dtype
)
assert_eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype="i8")
d = from_array(x, chunks=(2,))
def func(block, block_id=None, c=0):
return np.ones_like(block) * sum(block_id) + c
out = d.map_blocks(func, dtype="i8")
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype="i8")
assert_eq(out, expected)
assert same_keys(d.map_blocks(func, dtype="i8"), out)
out = d.map_blocks(func, dtype="i8", c=1)
expected = expected + 1
assert_eq(out, expected)
assert same_keys(d.map_blocks(func, dtype="i8", c=1), out)
def test_map_blocks_block_info():
x = da.arange(50, chunks=10)
def func(a, b, c, block_info=None):
for idx in [0, 2, None]: # positions in args
assert block_info[idx]["shape"] == (50,)
assert block_info[idx]["num-chunks"] == (5,)
start, stop = block_info[idx]["array-location"][0]
assert stop - start == 10
assert 0 <= start <= 40
assert 10 <= stop <= 50
assert 0 <= block_info[idx]["chunk-location"][0] <= 4
assert block_info[None]["chunk-shape"] == (10,)
assert block_info[None]["dtype"] == x.dtype
return a + b + c
z = da.map_blocks(func, x, 100, x + 1, dtype=x.dtype)
assert_eq(z, x + x + 1 + 100)
def test_map_blocks_block_info_with_new_axis():
# https://github.com/dask/dask/issues/4298
values = da.from_array(np.array(["a", "a", "b", "c"]), 2)
def func(x, block_info=None):
assert block_info.keys() == {0, None}
assert block_info[0]["shape"] == (4,)
assert block_info[0]["num-chunks"] == (2,)
assert block_info[None]["shape"] == (4, 3)
assert block_info[None]["num-chunks"] == (2, 1)
assert block_info[None]["chunk-shape"] == (2, 3)
assert block_info[None]["dtype"] == np.dtype("f8")
assert block_info[0]["chunk-location"] in {(0,), (1,)}
if block_info[0]["chunk-location"] == (0,):
assert block_info[0]["array-location"] == [(0, 2)]
assert block_info[None]["chunk-location"] == (0, 0)
assert block_info[None]["array-location"] == [(0, 2), (0, 3)]
elif block_info[0]["chunk-location"] == (1,):
assert block_info[0]["array-location"] == [(2, 4)]
assert block_info[None]["chunk-location"] == (1, 0)
assert block_info[None]["array-location"] == [(2, 4), (0, 3)]
return np.ones((len(x), 3))
z = values.map_blocks(func, chunks=((2, 2), 3), new_axis=1, dtype="f8")
assert_eq(z, np.ones((4, 3), dtype="f8"))
def test_map_blocks_block_info_with_drop_axis():
# https://github.com/dask/dask/issues/4584
values = da.from_array(
np.array(
[[1, 2, 4], [8, 16, 32], [64, 128, 256], [1024, 2048, 4096]], dtype="u4"
),
(2, 1),
)
def func(x, block_info=None):
assert block_info.keys() == {0, None}
assert block_info[0]["shape"] == (4, 3)
# drop_axis concatenates along the dropped dimension, hence not (2, 3)
assert block_info[0]["num-chunks"] == (2, 1)
assert block_info[None]["shape"] == (4,)
assert block_info[None]["num-chunks"] == (2,)
assert block_info[None]["chunk-shape"] == (2,)
assert block_info[None]["dtype"] == np.dtype("u4")
assert block_info[0]["chunk-location"] in {(0, 0), (1, 0)}
if block_info[0]["chunk-location"] == (0, 0):
assert block_info[0]["array-location"] == [(0, 2), (0, 3)]
assert block_info[None]["chunk-location"] == (0,)
assert block_info[None]["array-location"] == [(0, 2)]
elif block_info[0]["chunk-location"] == (1, 0):
assert block_info[0]["array-location"] == [(2, 4), (0, 3)]
assert block_info[None]["chunk-location"] == (1,)
assert block_info[None]["array-location"] == [(2, 4)]
return np.sum(x, axis=1, dtype="u4")
z = values.map_blocks(func, drop_axis=1, dtype="u4")
assert_eq(z, np.array([7, 56, 448, 7168], dtype="u4"))
def test_map_blocks_block_info_with_broadcast():
expected0 = [
{
"shape": (3, 4),
"num-chunks": (1, 2),
"array-location": [(0, 3), (0, 2)],
"chunk-location": (0, 0),
},
{
"shape": (3, 4),
"num-chunks": (1, 2),
"array-location": [(0, 3), (2, 4)],
"chunk-location": (0, 1),
},
]
expected1 = [
{
"shape": (6, 2),
"num-chunks": (2, 1),
"array-location": [(0, 3), (0, 2)],
"chunk-location": (0, 0),
},
{
"shape": (6, 2),
"num-chunks": (2, 1),
"array-location": [(3, 6), (0, 2)],
"chunk-location": (1, 0),
},
]
expected2 = [
{
"shape": (4,),
"num-chunks": (2,),
"array-location": [(0, 2)],
"chunk-location": (0,),
},
{
"shape": (4,),
"num-chunks": (2,),
"array-location": [(2, 4)],
"chunk-location": (1,),
},
]
expected = [
{
0: expected0[0],
1: expected1[0],
2: expected2[0],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(0, 3), (0, 2)],
"chunk-location": (0, 0),
},
},
{
0: expected0[1],
1: expected1[0],
2: expected2[1],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(0, 3), (2, 4)],
"chunk-location": (0, 1),
},
},
{
0: expected0[0],
1: expected1[1],
2: expected2[0],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(3, 6), (0, 2)],
"chunk-location": (1, 0),
},
},
{
0: expected0[1],
1: expected1[1],
2: expected2[1],
None: {
"shape": (6, 4),
"num-chunks": (2, 2),
"dtype": np.float_,
"chunk-shape": (3, 2),
"array-location": [(3, 6), (2, 4)],
"chunk-location": (1, 1),
},
},
]
def func(x, y, z, block_info=None):
for info in expected:
if block_info[None]["chunk-location"] == info[None]["chunk-location"]:
assert block_info == info
break
else:
assert False
return x + y + z
a = da.ones((3, 4), chunks=(3, 2))
b = da.ones((6, 2), chunks=(3, 2))
c = da.ones((4,), chunks=(2,))
d = da.map_blocks(func, a, b, c, chunks=((3, 3), (2, 2)), dtype=a.dtype)
assert d.chunks == ((3, 3), (2, 2))
assert_eq(d, 3 * np.ones((6, 4)))
def test_map_blocks_with_constants():
d = da.arange(10, chunks=3)
e = d.map_blocks(add, 100, dtype=d.dtype)
assert_eq(e, np.arange(10) + 100)
assert_eq(da.map_blocks(sub, d, 10, dtype=d.dtype), np.arange(10) - 10)
assert_eq(da.map_blocks(sub, 10, d, dtype=d.dtype), 10 - np.arange(10))
def test_map_blocks_with_kwargs():
d = da.arange(10, chunks=5)
result = d.map_blocks(np.max, axis=0, keepdims=True, dtype=d.dtype, chunks=(1,))
assert_eq(result, np.array([4, 9]))
def test_map_blocks_infer_chunks_broadcast():
dx = da.from_array([[1, 2, 3, 4]], chunks=((1,), (2, 2)))
dy = da.from_array([[10, 20], [30, 40]], chunks=((1, 1), (2,)))
result = da.map_blocks(lambda x, y: x + y, dx, dy)
assert result.chunks == ((1, 1), (2, 2))
assert_eq(result, np.array([[11, 22, 13, 24], [31, 42, 33, 44]]))
def test_map_blocks_with_chunks():
dx = da.ones((5, 3), chunks=(2, 2))
dy = da.ones((5, 3), chunks=(2, 2))
dz = da.map_blocks(np.add, dx, dy, chunks=dx.chunks)
assert_eq(dz, np.ones((5, 3)) * 2)
def test_map_blocks_dtype_inference():
x = np.arange(50).reshape((5, 10))
y = np.arange(10)
dx = da.from_array(x, chunks=5)
dy = da.from_array(y, chunks=5)
def foo(x, *args, **kwargs):
cast = kwargs.pop("cast", "i8")
return (x + sum(args)).astype(cast)
assert_eq(dx.map_blocks(foo, dy, 1), foo(dx, dy, 1))
assert_eq(dx.map_blocks(foo, dy, 1, cast="f8"), foo(dx, dy, 1, cast="f8"))
assert_eq(
dx.map_blocks(foo, dy, 1, cast="f8", dtype="f8"),
foo(dx, dy, 1, cast="f8", dtype="f8"),
)
def foo(x):
raise RuntimeError("Woops")
with pytest.raises(ValueError) as e:
dx.map_blocks(foo)
msg = str(e.value)
assert "dtype" in msg
def test_map_blocks_infer_newaxis():
x = da.ones((5, 3), chunks=(2, 2))
y = da.map_blocks(lambda x: x[None], x, chunks=((1,), (2, 2, 1), (2, 1)))
assert_eq(y, da.ones((1, 5, 3)))
def test_map_blocks_no_array_args():
def func(dtype, block_info=None):
loc = block_info[None]["array-location"]
return np.arange(loc[0][0], loc[0][1], dtype=dtype)
x = da.map_blocks(func, np.float32, chunks=((5, 3),), dtype=np.float32)
assert x.chunks == ((5, 3),)
assert_eq(x, np.arange(8, dtype=np.float32))
@pytest.mark.parametrize("func", [lambda x, y: x + y, lambda x, y, block_info: x + y])
def test_map_blocks_optimize_blockwise(func):
# Check that map_blocks layers can merge with elementwise layers
base = [da.full((1,), i, chunks=1) for i in range(4)]
a = base[0] + base[1]
b = da.map_blocks(func, a, base[2], dtype=np.int8)
c = b + base[3]
dsk = c.__dask_graph__()
optimized = optimize_blockwise(dsk)
# Everything should be fused into a single layer.
# If the lambda includes block_info, there will be two layers.
assert len(optimized.layers) == len(dsk.layers) - 6
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert key_split(d.name) in repr(d)
assert str(d.shape) in repr(d)
assert str(d.dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_repr_meta():
d = da.ones((4, 4), chunks=(2, 2))
assert "chunktype=numpy.ndarray" in repr(d)
# Test non-numpy meta
sparse = pytest.importorskip("sparse")
s = d.map_blocks(sparse.COO)
assert "chunktype=sparse.COO" in repr(s)
def test_repr_html_array_highlevelgraph():
pytest.importorskip("jinja2")
x = da.ones((9, 9), chunks=(3, 3)).T[0:4, 0:4]
hg = x.dask
assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None
for layer in hg.layers.values():
assert xml.etree.ElementTree.fromstring(layer._repr_html_()) is not None
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert_eq(d[..., 1], x[..., 1])
assert_eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert_eq(d[np.arange(8)], x)
assert_eq(d[np.ones(8, dtype=bool)], x)
assert_eq(d[np.array([1])], x[[1]])
assert_eq(d[np.array([True, False, True] + [False] * 5)], x[[0, 2]])
def test_slicing_flexible_type():
a = np.array([["a", "b"], ["c", "d"]])
b = da.from_array(a, 2)
assert_eq(a[:, 0], b[:, 0])
def test_slicing_with_object_dtype():
# https://github.com/dask/dask/issues/6892
d = da.from_array(np.array(["a", "b"], dtype=object), chunks=(1,))
assert d.dtype == d[(0,)].dtype
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
pytest.raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10,) * 10, (3,))
assert blockdims_from_blockshape((np.int8(10),), (5,)) == ((5, 5),)
def test_coerce():
d0 = da.from_array(np.array(1), chunks=(1,))
d1 = da.from_array(np.array([1]), chunks=(1,))
with dask.config.set(scheduler="sync"):
for d in d0, d1:
assert bool(d) is True
assert int(d) == 1
assert float(d) == 1.0
assert complex(d) == complex(1)
a2 = np.arange(2)
d2 = da.from_array(a2, chunks=(2,))
for func in (int, float, complex):
pytest.raises(TypeError, lambda: func(d2))
def test_bool():
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(10, 10))
with pytest.raises(ValueError):
bool(darr)
bool(darr == darr)
def test_store_kwargs():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
called = [False]
def get_func(*args, **kwargs):
assert kwargs.pop("foo") == "test kwarg"
r = dask.get(*args, **kwargs)
called[0] = True
return r
called[0] = False
at = np.zeros(shape=(10, 10))
store([a], [at], scheduler=get_func, foo="test kwarg")
assert called[0]
called[0] = False
at = np.zeros(shape=(10, 10))
a.store(at, scheduler=get_func, foo="test kwarg")
assert called[0]
called[0] = False
at = np.zeros(shape=(10, 10))
store([a], [at], scheduler=get_func, return_stored=True, foo="test kwarg")
assert called[0]
def test_store_delayed_target():
from dask.delayed import delayed
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
# empty buffers to be used as targets
targs = {}
def make_target(key):
a = np.empty((4, 4))
targs[key] = a
return a
# delayed calls to these targets
atd = delayed(make_target)("at")
btd = delayed(make_target)("bt")
# test not keeping result
st = store([a, b], [atd, btd])
at = targs["at"]
bt = targs["bt"]
assert st is None
assert_eq(at, a)
assert_eq(bt, b)
# test keeping result
for st_compute in [False, True]:
targs.clear()
st = store([a, b], [atd, btd], return_stored=True, compute=st_compute)
if st_compute:
assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in st)
st = dask.compute(*st)
at = targs["at"]
bt = targs["bt"]
assert st is not None
assert isinstance(st, tuple)
assert all([isinstance(v, np.ndarray) for v in st])
assert_eq(at, a)
assert_eq(bt, b)
assert_eq(st[0], a)
assert_eq(st[1], b)
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
st = store([a, b], [at, bt])
assert st is None
assert (at == 2).all()
assert (bt == 3).all()
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_store_regions():
d = da.ones((4, 4, 4), dtype=int, chunks=(2, 2, 2))
a, b = d + 1, d + 2
a = a[:, 1:, :].astype(float)
region = (slice(None, None, 2), slice(None), [1, 2, 4, 5])
# Single region:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=region, compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
# Multiple regions:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=[region, region], compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
# Single region (keep result):
for st_compute in [False, True]:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store(
[a, b], [at, bt], regions=region, compute=st_compute, return_stored=True
)
assert isinstance(v, tuple)
assert all([isinstance(e, da.Array) for e in v])
if st_compute:
assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)
else:
assert (at == 0).all() and (bt[region] == 0).all()
ar, br = v
assert ar.dtype == a.dtype
assert br.dtype == b.dtype
assert ar.shape == a.shape
assert br.shape == b.shape
assert ar.chunks == a.chunks
assert br.chunks == b.chunks
ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
assert (br == 3).all()
assert (ar == 2).all()
# Multiple regions (keep result):
for st_compute in [False, True]:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store(
[a, b],
[at, bt],
regions=[region, region],
compute=st_compute,
return_stored=True,
)
assert isinstance(v, tuple)
assert all([isinstance(e, da.Array) for e in v])
if st_compute:
assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)
else:
assert (at == 0).all() and (bt[region] == 0).all()
ar, br = v
assert ar.dtype == a.dtype
assert br.dtype == b.dtype
assert ar.shape == a.shape
assert br.shape == b.shape
assert ar.chunks == a.chunks
assert br.chunks == b.chunks
ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not (bt == 0).all()
assert not (at == 2).all() and not (at == 0).all()
assert (br == 3).all()
assert (ar == 2).all()
def test_store_compute_false():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(4, 4))
bt = np.zeros(shape=(4, 4))
v = store([a, b], [at, bt], compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at == 2).all() and (bt == 3).all()
at = np.zeros(shape=(4, 4))
bt = np.zeros(shape=(4, 4))
dat, dbt = store([a, b], [at, bt], compute=False, return_stored=True)
assert isinstance(dat, Array) and isinstance(dbt, Array)
assert (at == 0).all() and (bt == 0).all()
assert (dat.compute() == at).all() and (dbt.compute() == bt).all()
assert (at == 2).all() and (bt == 3).all()
def test_store_nocompute_regions():
x = da.ones(10, chunks=1)
y = np.zeros((2, 10))
d1 = da.store(x, y, regions=(0,), compute=False)
d2 = da.store(x, y, regions=(1,), compute=False)
assert d1.key != d2.key
class ThreadSafetyError(Exception):
pass
class NonthreadSafeStore:
def __init__(self):
self.in_use = False
def __setitem__(self, key, value):
if self.in_use:
raise ThreadSafetyError()
self.in_use = True
time.sleep(0.001)
self.in_use = False
class ThreadSafeStore:
def __init__(self):
self.concurrent_uses = 0
self.max_concurrent_uses = 0
def __setitem__(self, key, value):
self.concurrent_uses += 1
self.max_concurrent_uses = max(self.concurrent_uses, self.max_concurrent_uses)
time.sleep(0.01)
self.concurrent_uses -= 1
class CounterLock:
def __init__(self, *args, **kwargs):
self.lock = Lock(*args, **kwargs)
self.acquire_count = 0
self.release_count = 0
def acquire(self, *args, **kwargs):
self.acquire_count += 1
return self.lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
self.release_count += 1
return self.lock.release(*args, **kwargs)
def test_store_locks():
_Lock = type(Lock())
d = da.ones((10, 10), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(10, 10))
bt = np.zeros(shape=(10, 10))
lock = Lock()
v = store([a, b], [at, bt], compute=False, lock=lock)
assert isinstance(v, Delayed)
dsk = v.dask
locks = set(vv for v in dsk.values() for vv in v if isinstance(vv, _Lock))
assert locks == set([lock])
# Ensure same lock applies over multiple stores
at = NonthreadSafeStore()
v = store([a, b], [at, at], lock=lock, scheduler="threads", num_workers=10)
assert v is None
# Don't assume thread safety by default
at = NonthreadSafeStore()
assert store(a, at, scheduler="threads", num_workers=10) is None
assert a.store(at, scheduler="threads", num_workers=10) is None
# Ensure locks can be removed
at = ThreadSafeStore()
for i in range(10):
st = a.store(at, lock=False, scheduler="threads", num_workers=10)
assert st is None
if at.max_concurrent_uses > 1:
break
if i == 9:
assert False
# Verify number of lock calls
nchunks = np.sum([np.prod([len(c) for c in e.chunks]) for e in [a, b]])
for c in (False, True):
at = np.zeros(shape=(10, 10))
bt = np.zeros(shape=(10, 10))
lock = CounterLock()
v = store([a, b], [at, bt], lock=lock, compute=c, return_stored=True)
assert all(isinstance(e, Array) for e in v)
da.compute(v)
# When `return_stored=True` and `compute=False`,
# the lock should be acquired only once for store and load steps
# as they are fused together into one step.
assert lock.acquire_count == lock.release_count
if c:
assert lock.acquire_count == 2 * nchunks
else:
assert lock.acquire_count == nchunks
def test_store_method_return():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
for compute in [False, True]:
for return_stored in [False, True]:
at = np.zeros(shape=(10, 10))
r = a.store(
at, scheduler="threads", compute=compute, return_stored=return_stored
)
if return_stored:
assert isinstance(r, Array)
elif compute:
assert r is None
else:
assert isinstance(r, Delayed)
@pytest.mark.xfail(reason="can't lock with multiprocessing")
def test_store_multiprocessing_lock():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
at = np.zeros(shape=(10, 10))
st = a.store(at, scheduler="processes", num_workers=10)
assert st is None
def test_to_hdf5():
h5py = pytest.importorskip("h5py")
x = da.ones((4, 4), chunks=(2, 2))
y = da.ones(4, chunks=2, dtype="i4")
with tmpfile(".hdf5") as fn:
x.to_hdf5(fn, "/x")
with h5py.File(fn, mode="r+") as f:
d = f["/x"]
assert_eq(d[:], x)
assert d.chunks == (2, 2)
with tmpfile(".hdf5") as fn:
x.to_hdf5(fn, "/x", chunks=None)
with h5py.File(fn, mode="r+") as f:
d = f["/x"]
assert_eq(d[:], x)
assert d.chunks is None
with tmpfile(".hdf5") as fn:
x.to_hdf5(fn, "/x", chunks=(1, 1))
with h5py.File(fn, mode="r+") as f:
d = f["/x"]
assert_eq(d[:], x)
assert d.chunks == (1, 1)
with tmpfile(".hdf5") as fn:
da.to_hdf5(fn, {"/x": x, "/y": y})
with h5py.File(fn, mode="r+") as f:
assert_eq(f["/x"][:], x)
assert f["/x"].chunks == (2, 2)
assert_eq(f["/y"][:], y)
assert f["/y"].chunks == (2,)
def test_to_dask_dataframe():
dd = pytest.importorskip("dask.dataframe")
a = da.ones((4,), chunks=(2,))
d = a.to_dask_dataframe()
assert isinstance(d, dd.Series)
a = da.ones((4, 4), chunks=(2, 2))
d = a.to_dask_dataframe()
assert isinstance(d, dd.DataFrame)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert_eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype("f4")
y = np.arange(24).reshape((4, 6)).astype("i8")
z = np.arange(24).reshape((4, 6)).astype("i2")
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def assert_eq(a, b):
return isinstance(a, np.dtype) and isinstance(b, np.dtype) and str(a) == str(b)
assert_eq(a.dtype, x.dtype)
assert_eq(b.dtype, y.dtype)
assert_eq((a + 1).dtype, (x + 1).dtype)
assert_eq((a + b).dtype, (x + y).dtype)
assert_eq(a.T.dtype, x.T.dtype)
assert_eq(a[:3].dtype, x[:3].dtype)
assert_eq((a.dot(b.T)).dtype, (x.dot(y.T)).dtype)
assert_eq(stack([a, b]).dtype, np.vstack([x, y]).dtype)
assert_eq(concatenate([a, b]).dtype, np.concatenate([x, y]).dtype)
assert_eq(b.std().dtype, y.std().dtype)
assert_eq(c.sum().dtype, z.sum().dtype)
assert_eq(a.min().dtype, a.min().dtype)
assert_eq(b.std().dtype, b.std().dtype)
assert_eq(a.argmin(axis=0).dtype, a.argmin(axis=0).dtype)
assert_eq(da.sin(c).dtype, np.sin(z).dtype)
assert_eq(da.exp(b).dtype, np.exp(y).dtype)
assert_eq(da.floor(a).dtype, np.floor(x).dtype)
assert_eq(da.isnan(b).dtype, np.isnan(y).dtype)
with contextlib.suppress(ImportError):
assert da.isnull(b).dtype == "bool"
assert da.notnull(b).dtype == "bool"
x = np.array([("a", 1)], dtype=[("text", "S1"), ("numbers", "i4")])
d = da.from_array(x, chunks=(1,))
assert_eq(d["text"].dtype, x["text"].dtype)
assert_eq(d[["numbers", "text"]].dtype, x[["numbers", "text"]].dtype)
def test_astype():
x = np.ones((5, 5), dtype="f8")
d = da.from_array(x, chunks=(2, 2))
assert d.astype("i8").dtype == "i8"
assert_eq(d.astype("i8"), x.astype("i8"))
assert same_keys(d.astype("i8"), d.astype("i8"))
with pytest.raises(TypeError):
d.astype("i8", casting="safe")
with pytest.raises(TypeError):
d.astype("i8", not_a_real_kwarg="foo")
# smoketest with kwargs
assert_eq(d.astype("i8", copy=False), x.astype("i8", copy=False))
# Check it's a noop
assert d.astype("f8") is d
def test_arithmetic():
x = np.arange(5).astype("f4") + 2
y = np.arange(5).astype("i8") + 2
z = np.arange(5).astype("i4") + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert_eq(a + b, x + y)
assert_eq(a * b, x * y)
assert_eq(a - b, x - y)
assert_eq(a / b, x / y)
assert_eq(b & b, y & y)
assert_eq(b | b, y | y)
assert_eq(b ^ b, y ^ y)
assert_eq(a // b, x // y)
assert_eq(a ** b, x ** y)
assert_eq(a % b, x % y)
assert_eq(a > b, x > y)
assert_eq(a < b, x < y)
assert_eq(a >= b, x >= y)
assert_eq(a <= b, x <= y)
assert_eq(a == b, x == y)
assert_eq(a != b, x != y)
assert_eq(a + 2, x + 2)
assert_eq(a * 2, x * 2)
assert_eq(a - 2, x - 2)
assert_eq(a / 2, x / 2)
assert_eq(b & True, y & True)
assert_eq(b | True, y | True)
assert_eq(b ^ True, y ^ True)
assert_eq(a // 2, x // 2)
assert_eq(a ** 2, x ** 2)
assert_eq(a % 2, x % 2)
assert_eq(a > 2, x > 2)
assert_eq(a < 2, x < 2)
assert_eq(a >= 2, x >= 2)
assert_eq(a <= 2, x <= 2)
assert_eq(a == 2, x == 2)
assert_eq(a != 2, x != 2)
assert_eq(2 + b, 2 + y)
assert_eq(2 * b, 2 * y)
assert_eq(2 - b, 2 - y)
assert_eq(2 / b, 2 / y)
assert_eq(True & b, True & y)
assert_eq(True | b, True | y)
assert_eq(True ^ b, True ^ y)
assert_eq(2 // b, 2 // y)
assert_eq(2 ** b, 2 ** y)
assert_eq(2 % b, 2 % y)
assert_eq(2 > b, 2 > y)
assert_eq(2 < b, 2 < y)
assert_eq(2 >= b, 2 >= y)
assert_eq(2 <= b, 2 <= y)
assert_eq(2 == b, 2 == y)
assert_eq(2 != b, 2 != y)
assert_eq(-a, -x)
assert_eq(abs(a), abs(x))
assert_eq(~(a == b), ~(x == y))
assert_eq(~(a == b), ~(x == y))
assert_eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert_eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
with pytest.warns(None): # Overflow warning
assert_eq(da.exp(b), np.exp(y))
assert_eq(da.log(a), np.log(x))
assert_eq(da.log10(a), np.log10(x))
assert_eq(da.log1p(a), np.log1p(x))
with pytest.warns(None): # Overflow warning
assert_eq(da.expm1(b), np.expm1(y))
assert_eq(da.sqrt(a), np.sqrt(x))
assert_eq(da.square(a), np.square(x))
assert_eq(da.sin(a), np.sin(x))
assert_eq(da.cos(b), np.cos(y))
assert_eq(da.tan(a), np.tan(x))
assert_eq(da.arcsin(b / 10), np.arcsin(y / 10))
assert_eq(da.arccos(b / 10), np.arccos(y / 10))
assert_eq(da.arctan(b / 10), np.arctan(y / 10))
assert_eq(da.arctan2(b * 10, a), np.arctan2(y * 10, x))
assert_eq(da.hypot(b, a), np.hypot(y, x))
assert_eq(da.sinh(a), np.sinh(x))
with pytest.warns(None): # Overflow warning
assert_eq(da.cosh(b), np.cosh(y))
assert_eq(da.tanh(a), np.tanh(x))
assert_eq(da.arcsinh(b * 10), np.arcsinh(y * 10))
assert_eq(da.arccosh(b * 10), np.arccosh(y * 10))
assert_eq(da.arctanh(b / 10), np.arctanh(y / 10))
assert_eq(da.deg2rad(a), np.deg2rad(x))
assert_eq(da.rad2deg(a), np.rad2deg(x))
assert_eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert_eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert_eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert_eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert_eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert_eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert_eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert_eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert_eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert_eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert_eq(da.isfinite(a), np.isfinite(x))
assert_eq(da.isinf(a), np.isinf(x))
assert_eq(da.isnan(a), np.isnan(x))
assert_eq(da.signbit(a - 3), np.signbit(x - 3))
assert_eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert_eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
with pytest.warns(None): # overflow warning
assert_eq(da.ldexp(c, c), np.ldexp(z, z))
assert_eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert_eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert_eq(da.ceil(a), np.ceil(x))
assert_eq(da.trunc(a / 2), np.trunc(x / 2))
assert_eq(da.degrees(b), np.degrees(y))
assert_eq(da.radians(a), np.radians(x))
assert_eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert_eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert_eq(da.angle(a + 1j), np.angle(x + 1j))
assert_eq(da.real(a + 1j), np.real(x + 1j))
assert_eq((a + 1j).real, np.real(x + 1j))
assert_eq(da.imag(a + 1j), np.imag(x + 1j))
assert_eq((a + 1j).imag, np.imag(x + 1j))
assert_eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert_eq((a + 1j * b).conj(), (x + 1j * y).conj())
assert_eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert_eq(b.clip(1, 4), y.clip(1, 4))
assert_eq(da.fabs(b), np.fabs(y))
assert_eq(da.sign(b - 2), np.sign(y - 2))
assert_eq(da.absolute(b - 2), np.absolute(y - 2))
assert_eq(da.absolute(b - 2 + 1j), np.absolute(y - 2 + 1j))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
assert_eq(da.around(a, -1), np.around(x, -1))
def test_elemwise_consistent_names():
a = da.from_array(np.arange(5, dtype="f4"), chunks=(2,))
b = da.from_array(np.arange(5, dtype="f4"), chunks=(2,))
assert same_keys(a + b, a + b)
assert same_keys(a + 2, a + 2)
assert same_keys(da.exp(a), da.exp(a))
assert same_keys(da.exp(a, dtype="f8"), da.exp(a, dtype="f8"))
assert same_keys(da.maximum(a, b), da.maximum(a, b))
def test_optimize():
x = np.arange(5).astype("f4")
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr.__dask_keys__())
assert isinstance(result, dict)
assert all(key in result for key in expr.__dask_keys__())
def test_slicing_with_non_ndarrays():
class ARangeSlice:
dtype = np.dtype("i8")
ndim = 1
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable:
dtype = np.dtype("i8")
ndim = 1
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert_eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_getter():
assert type(getter(np.matrix([[1]]), 0)) is np.ndarray
assert type(getter(np.matrix([[1]]), 0, asarray=False)) is np.matrix
assert_eq(getter([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))
assert_eq(getter(np.arange(5), (None, slice(None, None))), np.arange(5)[None, :])
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
assert isinstance(x.size, int)
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_itemsize():
x = da.ones((10, 2), chunks=(3, 1))
assert x.itemsize == 8
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
def test_from_array_with_lock():
x = np.arange(10)
d = da.from_array(x, chunks=5, lock=True)
tasks = [v for k, v in d.dask.items() if k[0] == d.name]
assert hasattr(tasks[0][4], "acquire")
assert len(set(task[4] for task in tasks)) == 1
assert_eq(d, x)
lock = Lock()
e = da.from_array(x, chunks=5, lock=lock)
f = da.from_array(x, chunks=5, lock=lock)
assert_eq(e + f, x + x)
class MyArray:
def __init__(self, x):
self.x = x
self.dtype = x.dtype
self.shape = x.shape
self.ndim = len(x.shape)
def __getitem__(self, i):
return self.x[i]
@pytest.mark.parametrize(
"x,chunks",
[
(np.arange(25).reshape((5, 5)), (5, 5)),
(np.arange(25).reshape((5, 5)), -1),
(np.array([[1]]), 1),
(np.array(1), 1),
],
)
def test_from_array_tasks_always_call_getter(x, chunks):
dx = da.from_array(MyArray(x), chunks=chunks, asarray=False)
assert_eq(x, dx)
def test_from_array_ndarray_onechunk():
"""ndarray with a single chunk produces a minimal single key dict"""
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=-1)
assert_eq(x, dx)
assert len(dx.dask) == 1
assert dx.dask[dx.name, 0, 0] is x
def test_from_array_ndarray_getitem():
"""For ndarray, don't use getter / getter_nofancy; use the cleaner
operator.getitem"""
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=(1, 2))
assert_eq(x, dx)
assert (dx.dask[dx.name, 0, 0] == np.array([[1, 2]])).all()
@pytest.mark.parametrize("x", [[1, 2], (1, 2), memoryview(b"abc")])
def test_from_array_list(x):
"""Lists, tuples, and memoryviews are automatically converted to ndarray"""
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(dx.dask[dx.name, 0], np.ndarray)
dx = da.from_array(x, chunks=1)
assert_eq(np.array(x), dx)
assert dx.dask[dx.name, 0][0] == x[0]
# On MacOS Python 3.9, the order of the np.ScalarType tuple randomly changes across
# interpreter restarts, thus causing pytest-xdist failures; setting PYTHONHASHSEED does
# not help
@pytest.mark.parametrize(
"type_", sorted((t for t in np.ScalarType if t is not memoryview), key=str)
)
def test_from_array_scalar(type_):
"""Python and numpy scalars are automatically converted to ndarray"""
if type_ == np.datetime64:
x = np.datetime64("2000-01-01")
else:
x = type_(1)
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(
dx.dask[
dx.name,
],
np.ndarray,
)
@pytest.mark.parametrize("asarray,cls", [(True, np.ndarray), (False, np.matrix)])
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_from_array_no_asarray(asarray, cls):
def assert_chunks_are_of_type(x):
chunks = compute_as_if_collection(Array, x.dask, x.__dask_keys__())
for c in concat(chunks):
assert type(c) is cls
x = np.matrix(np.arange(100).reshape((10, 10)))
dx = da.from_array(x, chunks=(5, 5), asarray=asarray)
assert_chunks_are_of_type(dx)
assert_chunks_are_of_type(dx[0:5])
assert_chunks_are_of_type(dx[0:5][:, 0])
def test_from_array_getitem():
x = np.arange(10)
def my_getitem(x, ind):
return x[ind]
y = da.from_array(x, chunks=(5,), getitem=my_getitem)
for k, v in y.dask.items():
if isinstance(v, tuple):
assert v[0] is my_getitem
assert_eq(x, y)
def test_from_array_minus_one():
x = np.arange(10)
y = da.from_array(x, -1)
assert y.chunks == ((10,),)
assert_eq(x, y)
def test_from_array_copy():
# Regression test for https://github.com/dask/dask/issues/3751
x = np.arange(10)
y = da.from_array(x, -1)
assert y.npartitions == 1
y_c = y.copy()
assert y is not y_c
assert y.compute() is not y_c.compute()
def test_from_array_dask_array():
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=(1, 2))
with pytest.raises(ValueError):
da.from_array(dx)
def test_from_array_dask_collection_warns():
class CustomCollection(np.ndarray):
def __dask_graph__(self):
return {"bar": 1}
x = CustomCollection([1, 2, 3])
with pytest.warns(UserWarning):
da.from_array(x)
# Ensure da.array warns too
with pytest.warns(UserWarning):
da.array(x)
def test_from_array_inline():
class MyArray(np.ndarray):
pass
a = np.array([1, 2, 3]).view(MyArray)
dsk = dict(da.from_array(a, name="my-array").dask)
assert dsk["my-array"] is a
dsk = dict(da.from_array(a, name="my-array", inline_array=True).dask)
assert "my-array" not in dsk
assert a is dsk[("my-array", 0)][1]
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray(asarray):
assert_eq(asarray([1, 2, 3]), np.asarray([1, 2, 3]))
x = asarray([1, 2, 3])
assert asarray(x) is x
y = [x[0], 2, x[2]]
assert_eq(asarray(y), x)
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray_dask_dataframe(asarray):
# https://github.com/dask/dask/issues/3885
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
s = dd.from_pandas(pd.Series([1, 2, 3, 4]), 2)
result = asarray(s)
expected = s.values
assert_eq(result, expected)
df = s.to_frame(name="s")
result = asarray(df)
expected = df.values
assert_eq(result, expected)
@pytest.mark.parametrize("asarray", [da.asarray, da.asanyarray])
def test_asarray_h5py(asarray):
h5py = pytest.importorskip("h5py")
with tmpfile(".hdf5") as fn:
with h5py.File(fn, mode="a") as f:
d = f.create_dataset("/x", shape=(2, 2), dtype=float)
x = asarray(d)
assert d in x.dask.values()
assert not any(isinstance(v, np.ndarray) for v in x.dask.values())
def test_asarray_chunks():
with dask.config.set({"array.chunk-size": "100 B"}):
x = np.ones(1000)
d = da.asarray(x)
assert d.npartitions > 1
@pytest.mark.filterwarnings("ignore:the matrix subclass")
def test_asanyarray():
x = np.matrix([1, 2, 3])
dx = da.asanyarray(x)
assert dx.numblocks == (1, 1)
chunks = compute_as_if_collection(Array, dx.dask, dx.__dask_keys__())
assert isinstance(chunks[0][0], np.matrix)
assert da.asanyarray(dx) is dx
def test_asanyarray_dataframe():
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2)
x = np.asanyarray(df)
dx = da.asanyarray(ddf)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
x = np.asanyarray(df.x)
dx = da.asanyarray(ddf.x)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
def test_asanyarray_datetime64():
x = np.array(["2000-01-01"], dtype="datetime64")
dx = da.asanyarray(x)
assert isinstance(dx, da.Array)
assert_eq(x, dx)
def test_from_func():
x = np.arange(10)
f = lambda n: n * x
d = from_func(f, (10,), x.dtype, kwargs={"n": 2})
assert d.shape == x.shape
assert d.dtype == x.dtype
assert_eq(d, 2 * x)
assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={"n": 2}))
def test_concatenate3_2():
x = np.array([1, 2])
assert_eq(concatenate3([x, x, x]), np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (
concatenate3([[x, x, x], [x, x, x]])
== np.array([[1, 2, 1, 2, 1, 2], [1, 2, 1, 2, 1, 2]])
).all()
assert (
concatenate3([[x, x], [x, x], [x, x]])
== np.array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]])
).all()
x = np.arange(12).reshape((2, 2, 3))
assert_eq(
concatenate3([[[x, x, x], [x, x, x]], [[x, x, x], [x, x, x]]]),
np.array(
[
[
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
],
[
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
],
[
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
],
[
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11],
],
]
),
)
@pytest.mark.parametrize("one_d", [True, False])
@mock.patch.object(da.core, "_concatenate2", wraps=da.core._concatenate2)
def test_concatenate3_nep18_dispatching(mock_concatenate2, one_d):
x = EncapsulateNDArray(np.arange(10))
concat = [x, x] if one_d else [[x[None]], [x[None]]]
result = concatenate3(concat)
assert type(result) is type(x)
mock_concatenate2.assert_called()
mock_concatenate2.reset_mock()
# When all the inputs are supported by plain `np.concatenate`, we should take the concatenate3
# fastpath of allocating the full array up front and writing blocks into it.
concat = [x.arr, x.arr] if one_d else [[x.arr[None]], [x.arr[None]]]
plain_np_result = concatenate3(concat)
mock_concatenate2.assert_not_called()
assert type(plain_np_result) is np.ndarray
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert_eq(
da.core.map_blocks(lambda a, b: a + 2 * b, d, e, dtype=d.dtype), x + 2 * y
)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
func = lambda a, b: a + 2 * b
res = da.core.map_blocks(func, d, f, dtype=d.dtype)
assert_eq(res, x + 2 * z)
assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)
assert_eq(da.map_blocks(func, f, d, dtype=d.dtype), z + 2 * x)
def test_from_array_with_missing_chunks():
x = np.random.randn(2, 4, 3)
d = da.from_array(x, chunks=(None, 2, None))
assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks
def test_normalize_chunks():
assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))
assert normalize_chunks(((3, 3), (8,)), (6, 8)) == ((3, 3), (8,))
assert normalize_chunks((4, 5), (9,)) == ((4, 5),)
assert normalize_chunks((4, 5), (9, 9)) == ((4, 4, 1), (5, 4))
assert normalize_chunks(-1, (5, 5)) == ((5,), (5,))
assert normalize_chunks((3, -1), (5, 5)) == ((3, 2), (5,))
assert normalize_chunks((3, None), (5, 5)) == ((3, 2), (5,))
assert normalize_chunks({0: 3}, (5, 5)) == ((3, 2), (5,))
assert normalize_chunks([[2, 2], [3, 3]]) == ((2, 2), (3, 3))
assert normalize_chunks(10, (30, 5)) == ((10, 10, 10), (5,))
assert normalize_chunks((), (0, 0)) == ((0,), (0,))
assert normalize_chunks(-1, (0, 3)) == ((0,), (3,))
assert normalize_chunks("auto", shape=(20,), limit=5, dtype="uint8") == (
(5, 5, 5, 5),
)
assert normalize_chunks(("auto", None), (5, 5), dtype=int) == ((5,), (5,))
with pytest.raises(ValueError):
normalize_chunks(((10,),), (11,))
with pytest.raises(ValueError):
normalize_chunks(((5,), (5,)), (5,))
def test_align_chunks_to_previous_chunks():
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(512,), limit="600 B", dtype=np.uint8
)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(128,), limit="600 B", dtype=np.uint8
)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks(
"auto", shape=(2000,), previous_chunks=(512,), limit="1200 B", dtype=np.uint8
)
assert chunks == ((1024, 2000 - 1024),)
chunks = normalize_chunks(
"auto",
shape=(3, 10211, 10376),
previous_chunks=(1, 512, 512),
limit="1MiB",
dtype=np.float32,
)
assert chunks[0] == (1, 1, 1)
assert all(c % 512 == 0 for c in chunks[1][:-1])
assert all(c % 512 == 0 for c in chunks[2][:-1])
def test_raise_on_no_chunks():
x = da.ones(6, chunks=3)
try:
Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)
assert False
except ValueError as e:
assert "dask" in str(e)
assert ".org" in str(e)
def test_chunks_is_immutable():
x = da.ones(6, chunks=3)
try:
x.chunks = 2
assert False
except TypeError as e:
assert "rechunk(2)" in str(e)
def test_raise_on_bad_kwargs():
x = da.ones(5, chunks=3)
try:
da.minimum(x, foo=None)
except TypeError as e:
assert "minimum" in str(e)
assert "foo" in str(e)
def test_long_slice():
x = np.arange(10000)
d = da.from_array(x, chunks=1)
assert_eq(d[8000:8200], x[8000:8200])
def test_h5py_newaxis():
h5py = pytest.importorskip("h5py")
with tmpfile("h5") as fn:
with h5py.File(fn, mode="a") as f:
x = f.create_dataset("/x", shape=(10, 10), dtype="f8")
d = da.from_array(x, chunks=(5, 5))
assert d[None, :, :].compute(scheduler="sync").shape == (1, 10, 10)
assert d[:, None, :].compute(scheduler="sync").shape == (10, 1, 10)
assert d[:, :, None].compute(scheduler="sync").shape == (10, 10, 1)
assert same_keys(d[:, :, None], d[:, :, None])
def test_ellipsis_slicing():
assert_eq(da.ones(4, chunks=2)[...], np.ones(4))
def test_point_slicing():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]
assert_eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])
result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]
assert_eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])
assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])
def test_point_slicing_with_full_slice():
from dask.array.core import _get_axis, _vindex_transpose
x = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7))
d = da.from_array(x, chunks=(2, 3, 3, 4))
inds = [
[[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
[[1, 2, 3], None, [4, 3, 2], None],
[[1, 2, 3], [3, 2, 1]],
[[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
[[], [], [], None],
[np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
[None, None, [1, 2, 3], [4, 3, 2]],
[None, [0, 2, 3], None, [0, 3, 2]],
]
for ind in inds:
slc = [
i if isinstance(i, (np.ndarray, list)) else slice(None, None) for i in ind
]
result = d.vindex[tuple(slc)]
# Rotate the expected result accordingly
axis = _get_axis(ind)
expected = _vindex_transpose(x[tuple(slc)], axis)
assert_eq(result, expected)
# Always have the first axis be the length of the points
k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
assert result.shape[0] == k
def test_slice_with_floats():
d = da.ones((5,), chunks=(3,))
with pytest.raises(IndexError):
d[1.5]
with pytest.raises(IndexError):
d[0:1.5]
with pytest.raises(IndexError):
d[[1, 1.5]]
def test_slice_with_integer_types():
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = np.array([0, 3, 6], dtype="u8")
assert_eq(dx[inds], x[inds])
assert_eq(dx[inds.astype("u4")], x[inds.astype("u4")])
inds = np.array([0, 3, 6], dtype=np.int64)
assert_eq(dx[inds], x[inds])
assert_eq(dx[inds.astype("u4")], x[inds.astype("u4")])
def test_index_with_integer_types():
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = int(3)
assert_eq(dx[inds], x[inds])
inds = np.int64(3)
assert_eq(dx[inds], x[inds])
def test_vindex_basic():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
# cases where basic and advanced indexing coincide
result = d.vindex[0]
assert_eq(result, x[0])
result = d.vindex[0, 1]
assert_eq(result, x[0, 1])
result = d.vindex[[0, 1], ::-1] # slices last
assert_eq(result, x[:2, ::-1])
def test_vindex_nd():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[[0, 1], [6, 0]], [[0, 1], [0, 7]]]
assert_eq(result, x[[[0, 1], [6, 0]], [[0, 1], [0, 7]]])
result = d.vindex[np.arange(7)[:, None], np.arange(8)[None, :]]
assert_eq(result, x)
result = d.vindex[np.arange(7)[None, :], np.arange(8)[:, None]]
assert_eq(result, x.T)
def test_vindex_negative():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
result = d.vindex[np.array([0, -1])]
assert_eq(result, x[np.array([0, -1])])
def test_vindex_errors():
d = da.ones((5, 5, 5), chunks=(3, 3, 3))
pytest.raises(IndexError, lambda: d.vindex[np.newaxis])
pytest.raises(IndexError, lambda: d.vindex[[1, 2], [1, 2, 3]])
pytest.raises(IndexError, lambda: d.vindex[[True] * 5])
pytest.raises(IndexError, lambda: d.vindex[[0], [5]])
pytest.raises(IndexError, lambda: d.vindex[[0], [-6]])
def test_vindex_merge():
from dask.array.core import _vindex_merge
locations = [1], [2, 0]
values = [np.array([[1, 2, 3]]), np.array([[10, 20, 30], [40, 50, 60]])]
assert (
_vindex_merge(locations, values)
== np.array([[40, 50, 60], [1, 2, 3], [10, 20, 30]])
).all()
def test_vindex_identity():
rng = da.random.RandomState(42)
a, b = 10, 20
x = rng.random(a, chunks=a // 2)
assert x is x.vindex[:]
assert x is x.vindex[:a]
pytest.raises(IndexError, lambda: x.vindex[: a - 1])
pytest.raises(IndexError, lambda: x.vindex[1:])
pytest.raises(IndexError, lambda: x.vindex[0:a:2])
x = rng.random((a, b), chunks=(a // 2, b // 2))
assert x is x.vindex[:, :]
assert x is x.vindex[:a, :b]
pytest.raises(IndexError, lambda: x.vindex[:, : b - 1])
pytest.raises(IndexError, lambda: x.vindex[:, 1:])
pytest.raises(IndexError, lambda: x.vindex[:, 0:b:2])
def test_empty_array():
assert_eq(np.arange(0), da.arange(0, chunks=5))
def test_memmap():
with tmpfile("npy") as fn_1:
with tmpfile("npy") as fn_2:
try:
x = da.arange(100, chunks=15)
target = np.memmap(fn_1, shape=x.shape, mode="w+", dtype=x.dtype)
x.store(target)
assert_eq(target, x, check_type=False)
np.save(fn_2, target)
assert_eq(np.load(fn_2, mmap_mode="r"), x, check_type=False)
finally:
target._mmap.close()
def test_to_npy_stack():
x = np.arange(5 * 10 * 10).reshape((5, 10, 10))
d = da.from_array(x, chunks=(2, 4, 4))
with tmpdir() as dirname:
stackdir = os.path.join(dirname, "test")
da.to_npy_stack(stackdir, d, axis=0)
assert os.path.exists(os.path.join(stackdir, "0.npy"))
assert (np.load(os.path.join(stackdir, "1.npy")) == x[2:4]).all()
e = da.from_npy_stack(stackdir)
assert_eq(d, e)
def test_view():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.view(), d.view())
assert_eq(x.view("i4"), d.view("i4"))
assert_eq(x.view("i2"), d.view("i2"))
assert all(isinstance(s, int) for s in d.shape)
x = np.arange(8, dtype="i1")
d = da.from_array(x, chunks=(4,))
assert_eq(x.view("i4"), d.view("i4"))
with pytest.raises(ValueError):
x = np.arange(8, dtype="i1")
d = da.from_array(x, chunks=(3,))
d.view("i4")
with pytest.raises(ValueError):
d.view("i4", order="asdf")
def test_view_fortran():
x = np.asfortranarray(np.arange(64).reshape((8, 8)))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.T.view("i4").T, d.view("i4", order="F"))
assert_eq(x.T.view("i2").T, d.view("i2", order="F"))
def test_h5py_tokenize():
h5py = pytest.importorskip("h5py")
with tmpfile("hdf5") as fn1:
with tmpfile("hdf5") as fn2:
f = h5py.File(fn1, mode="a")
g = h5py.File(fn2, mode="a")
f["x"] = np.arange(10).astype(float)
g["x"] = np.ones(10).astype(float)
x1 = f["x"]
x2 = g["x"]
assert tokenize(x1) != tokenize(x2)
def test_map_blocks_with_changed_dimension():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0, dtype=d.dtype)
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
# Provided chunks have wrong shape
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=(), drop_axis=0)
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=((4, 4, 4),), drop_axis=0)
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=1), chunks=((3, 4),), drop_axis=1)
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(lambda b: b.sum(axis=1), drop_axis=1, dtype=d.dtype)
assert e.chunks == ((4, 3),)
assert_eq(e, x.sum(axis=1))
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = d.map_blocks(
lambda b: b[None, :, :, None],
chunks=(1, 4, 4, 1),
new_axis=[0, 3],
dtype=d.dtype,
)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
e = d.map_blocks(lambda b: b[None, :, :, None], new_axis=[0, 3], dtype=d.dtype)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
# Adding axis with a gap
with pytest.raises(ValueError):
d.map_blocks(lambda b: b, new_axis=(3, 4))
# Both new_axis and drop_axis
d = da.from_array(x, chunks=(8, 4))
e = d.map_blocks(
lambda b: b.sum(axis=0)[:, None, None],
drop_axis=0,
new_axis=(1, 2),
dtype=d.dtype,
)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=0)[:, None, None])
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(
lambda b: b.sum(axis=1)[:, None, None],
drop_axis=1,
new_axis=(1, 2),
dtype=d.dtype,
)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=1)[:, None, None])
def test_map_blocks_with_negative_drop_axis():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
for drop_axis in [0, -2]:
# test with equivalent positive and negative drop_axis
e = d.map_blocks(
lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype
)
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
def test_map_blocks_with_invalid_drop_axis():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
for drop_axis in [x.ndim, -x.ndim - 1]:
with pytest.raises(ValueError):
d.map_blocks(
lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype
)
def test_map_blocks_with_changed_dimension_and_broadcast_chunks():
# https://github.com/dask/dask/issues/4299
a = da.from_array([1, 2, 3], 3)
b = da.from_array(np.array([0, 1, 2, 0, 1, 2]), chunks=3)
result = da.map_blocks(operator.add, a, b, chunks=b.chunks)
expected = da.from_array(np.array([1, 3, 5, 1, 3, 5]), chunks=3)
assert_eq(result, expected)
def test_broadcast_chunks():
assert broadcast_chunks() == ()
assert broadcast_chunks(((2, 3),)) == ((2, 3),)
assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)
a = ((10, 10, 10), (5, 5))
b = ((5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))
assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5))
a = ((10, 10, 10), (5, 5))
b = ((1,), (5, 5))
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))
a = ((10, 10, 10), (5, 5))
b = ((3, 3), (5, 5))
with pytest.raises(ValueError):
broadcast_chunks(a, b)
a = ((1,), (5, 5))
b = ((1,), (5, 5))
assert broadcast_chunks(a, b) == a
a = ((1,), (np.nan, np.nan, np.nan))
b = ((3, 3), (1,))
r = broadcast_chunks(a, b)
assert r[0] == b[0] and np.allclose(r[1], a[1], equal_nan=True)
a = ((3, 3), (1,))
b = ((1,), (np.nan, np.nan, np.nan))
r = broadcast_chunks(a, b)
assert r[0] == a[0] and np.allclose(r[1], b[1], equal_nan=True)
a = ((3, 3), (5, 5))
b = ((1,), (np.nan, np.nan, np.nan))
with pytest.raises(ValueError):
broadcast_chunks(a, b)
def test_chunks_error():
x = np.ones((10, 10))
with pytest.raises(ValueError):
da.from_array(x, chunks=(5,))
def test_array_compute_forward_kwargs():
x = da.arange(10, chunks=2).sum()
x.compute(bogus_keyword=10)
def test_dont_fuse_outputs():
dsk = {("x", 0): np.array([1, 2]), ("x", 1): (inc, ("x", 0))}
a = da.Array(dsk, "x", chunks=(2,), shape=(4,), dtype=np.array([1]).dtype)
assert_eq(a, np.array([1, 2, 2, 3], dtype=a.dtype))
def test_dont_dealias_outputs():
dsk = {
("x", 0, 0): np.ones((2, 2)),
("x", 0, 1): np.ones((2, 2)),
("x", 1, 0): np.ones((2, 2)),
("x", 1, 1): ("x", 0, 0),
}
a = da.Array(dsk, "x", chunks=(2, 2), shape=(4, 4), dtype=np.ones(1).dtype)
assert_eq(a, np.ones((4, 4)))
def test_timedelta_op():
x = np.array([np.timedelta64(10, "h")])
y = np.timedelta64(1, "h")
a = da.from_array(x, chunks=(1,)) / y
assert a.compute() == x / y
def test_to_delayed():
x = da.random.random((4, 4), chunks=(2, 2))
y = x + 10
[[a, b], [c, d]] = y.to_delayed()
assert_eq(a.compute(), y[:2, :2])
s = 2
x = da.from_array(np.array(s), chunks=0)
a = x.to_delayed()[tuple()]
assert a.compute() == s
def test_to_delayed_optimize_graph():
x = da.ones((4, 4), chunks=(2, 2))
y = x[1:][1:][1:][:, 1:][:, 1:][:, 1:]
# optimizations
d = y.to_delayed().flatten().tolist()[0]
assert len([k for k in d.dask if k[0].startswith("getitem")]) == 1
# no optimizations
d2 = y.to_delayed(optimize_graph=False).flatten().tolist()[0]
assert dict(d2.dask) == dict(y.dask)
assert (d.compute() == d2.compute()).all()
def test_cumulative():
x = da.arange(20, chunks=5)
assert_eq(x.cumsum(axis=0), np.arange(20).cumsum())
assert_eq(x.cumprod(axis=0), np.arange(20).cumprod())
assert_eq(da.nancumsum(x, axis=0), nancumsum(np.arange(20)))
assert_eq(da.nancumprod(x, axis=0), nancumprod(np.arange(20)))
a = np.random.random(20)
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=5)
assert_eq(da.nancumsum(x, axis=0), nancumsum(a))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a))
a = np.random.random((20, 24))
x = da.from_array(a, chunks=(6, 5))
assert_eq(x.cumsum(axis=0), a.cumsum(axis=0))
assert_eq(x.cumsum(axis=1), a.cumsum(axis=1))
assert_eq(x.cumprod(axis=0), a.cumprod(axis=0))
assert_eq(x.cumprod(axis=1), a.cumprod(axis=1))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24, 13))
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(x.cumsum(axis=axis), a.cumsum(axis=axis))
assert_eq(x.cumprod(axis=axis), a.cumprod(axis=axis))
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
a = np.random.random((20, 24, 13))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
with pytest.raises(ValueError):
x.cumsum(axis=3)
with pytest.raises(ValueError):
x.cumsum(axis=-4)
def test_from_delayed():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), dtype=np.ones(0).dtype)
assert isinstance(x, Array)
assert_eq(x, np.ones((5, 3)))
def test_from_delayed_meta():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), meta=np.ones(0))
assert isinstance(x, Array)
assert isinstance(x._meta, np.ndarray)
def test_A_property():
x = da.ones(5, chunks=(2,))
assert x.A is x
def test_copy_mutate():
x = da.arange(5, chunks=(2,))
y = x.copy()
memo = {}
y2 = copy.deepcopy(x, memo=memo)
x[x % 2 == 0] = -1
xx = np.arange(5)
xx[xx % 2 == 0] = -1
assert_eq(x, xx)
assert_eq(y, np.arange(5))
assert_eq(y2, np.arange(5))
assert memo[id(x)] is y2
def test_npartitions():
assert da.ones(5, chunks=(2,)).npartitions == 3
assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6
def test_astype_gh1151():
a = np.arange(5).astype(np.int32)
b = da.from_array(a, (1,))
assert_eq(a.astype(np.int16), b.astype(np.int16))
def test_elemwise_name():
assert (da.ones(5, chunks=2) + 1).name.startswith("add-")
def test_map_blocks_name():
assert da.ones(5, chunks=2).map_blocks(inc).name.startswith("inc-")
def test_from_array_names():
pytest.importorskip("distributed")
x = np.ones(10)
d = da.from_array(x, chunks=2)
names = countby(key_split, d.dask)
assert set(names.values()) == set([5])
@pytest.mark.parametrize(
"array",
[
da.arange(100, chunks=25),
da.ones((10, 10), chunks=25),
],
)
def test_array_picklable(array):
from pickle import dumps, loads
a2 = loads(dumps(array))
assert_eq(array, a2)
def test_from_array_raises_on_bad_chunks():
x = np.ones(10)
with pytest.raises(ValueError):
da.from_array(x, chunks=(5, 5, 5))
# with pytest.raises(ValueError):
# da.from_array(x, chunks=100)
with pytest.raises(ValueError):
da.from_array(x, chunks=((5, 5, 5),))
def test_concatenate_axes():
x = np.ones((2, 2, 2))
assert_eq(concatenate_axes([x, x], axes=[0]), np.ones((4, 2, 2)))
assert_eq(concatenate_axes([x, x, x], axes=[0]), np.ones((6, 2, 2)))
assert_eq(concatenate_axes([x, x], axes=[1]), np.ones((2, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 1]), np.ones((4, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 2]), np.ones((4, 2, 4)))
assert_eq(concatenate_axes([[x, x, x], [x, x, x]], axes=[1, 2]), np.ones((2, 4, 6)))
with pytest.raises(ValueError):
concatenate_axes(
[[x, x], [x, x]], axes=[0]
) # not all nested lists accounted for
with pytest.raises(ValueError):
concatenate_axes([x, x], axes=[0, 1, 2, 3]) # too many axes
def test_blockwise_concatenate():
x = da.ones((4, 4, 4), chunks=(2, 2, 2))
y = da.ones((4, 4), chunks=(2, 2))
def f(a, b):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert a.shape == (2, 4, 4)
assert b.shape == (4, 4)
return (a + b).sum(axis=(1, 2))
z = da.blockwise(f, "i", x, "ijk", y, "jk", concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones(4) * 32)
z = da.blockwise(add, "ij", y, "ij", y, "ij", concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones((4, 4)) * 2)
def f(a, b, c):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert isinstance(c, np.ndarray)
assert a.shape == (4, 2, 4)
assert b.shape == (4, 4)
assert c.shape == (4, 2)
return np.ones(2)
z = da.blockwise(
f, "j", x, "ijk", y, "ki", y, "ij", concatenate=True, dtype=x.dtype
)
assert_eq(z, np.ones(4), check_shape=False)
def test_common_blockdim():
assert common_blockdim([(5,), (5,)]) == (5,)
assert common_blockdim([(5,), (2, 3)]) == (2, 3)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 2, 3), (2, 3, 5)]) == (2, 3, 2, 3)
assert common_blockdim([(1, 2), (2, 1)]) == (1, 1, 1)
assert common_blockdim([(1, 2, 2), (2, 1, 2), (2, 2, 1)]) == (1, 1, 1, 1, 1)
def test_uneven_chunks_that_fit_neatly():
x = da.arange(10, chunks=((5, 5),))
y = da.ones(10, chunks=((5, 2, 3),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((5, 2, 3),)
def test_elemwise_uneven_chunks():
x = da.arange(10, chunks=((4, 6),))
y = da.ones(10, chunks=((6, 4),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((4, 2, 4),)
x = da.random.random((10, 10), chunks=((4, 6), (5, 2, 3)))
y = da.random.random((4, 10, 10), chunks=((2, 2), (6, 4), (2, 3, 5)))
z = x + y
assert_eq(x + y, x.compute() + y.compute())
assert z.chunks == ((2, 2), (4, 2, 4), (2, 3, 2, 3))
def test_uneven_chunks_blockwise():
x = da.random.random((10, 10), chunks=((2, 3, 2, 3), (5, 5)))
y = da.random.random((10, 10), chunks=((4, 4, 2), (4, 2, 4)))
z = da.blockwise(np.dot, "ik", x, "ij", y, "jk", dtype=x.dtype, concatenate=True)
assert z.chunks == (x.chunks[0], y.chunks[1])
assert_eq(z, x.compute().dot(y))
def test_warn_bad_rechunking():
x = da.ones((20, 20), chunks=(20, 1))
y = da.ones((20, 20), chunks=(1, 20))
with pytest.warns(da.core.PerformanceWarning, match="factor of 20"):
x + y
def test_concatenate_stack_dont_warn():
with warnings.catch_warnings(record=True) as record:
da.concatenate([da.ones(2, chunks=1)] * 62)
assert not record
with warnings.catch_warnings(record=True) as record:
da.stack([da.ones(2, chunks=1)] * 62)
assert not record
def test_map_blocks_delayed():
x = da.ones((10, 10), chunks=(5, 5))
y = np.ones((5, 5))
z = x.map_blocks(add, y, dtype=x.dtype)
yy = delayed(y)
zz = x.map_blocks(add, yy, dtype=x.dtype)
assert_eq(z, zz)
assert yy.key in zz.dask
def test_no_chunks():
X = np.arange(11)
dsk = {("x", 0): np.arange(5), ("x", 1): np.arange(5, 11)}
x = Array(dsk, "x", ((np.nan, np.nan),), np.arange(1).dtype)
assert_eq(x + 1, X + 1)
assert_eq(x.sum(), X.sum())
assert_eq((x + 1).std(), (X + 1).std())
assert_eq((x + x).std(), (X + X).std())
assert_eq((x + x).std(keepdims=True), (X + X).std(keepdims=True))
def test_no_chunks_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((np.nan, np.nan), (np.nan, np.nan, np.nan))
with pytest.warns(None): # zero division warning
assert_eq(da.log(x), np.log(X))
assert_eq(x.T, X.T)
assert_eq(x.sum(axis=0, keepdims=True), X.sum(axis=0, keepdims=True))
assert_eq(x.sum(axis=1, keepdims=True), X.sum(axis=1, keepdims=True))
assert_eq(x.dot(x.T + 1), X.dot(X.T + 1))
def test_no_chunks_yes_chunks():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert (x + 1).chunks == ((2, 2), (np.nan, np.nan, np.nan))
assert (x.T).chunks == ((np.nan, np.nan, np.nan), (2, 2))
assert (x.dot(x.T)).chunks == ((2, 2), (2, 2))
def test_raise_informative_errors_no_chunks():
X = np.arange(10)
a = da.from_array(X, chunks=(5, 5))
a._chunks = ((np.nan, np.nan),)
b = da.from_array(X, chunks=(4, 4, 2))
b._chunks = ((np.nan, np.nan, np.nan),)
for op in [
lambda: a + b,
lambda: a[1],
lambda: a[::2],
lambda: a[-5],
lambda: a.rechunk(3),
lambda: a.reshape(2, 5),
]:
with pytest.raises(ValueError) as e:
op()
if "chunk" not in str(e.value) or "unknown" not in str(e.value):
op()
def test_no_chunks_slicing_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert_eq(x[0], X[0])
for op in [lambda: x[:, 4], lambda: x[:, ::2], lambda: x[0, 2:4]]:
with pytest.raises(ValueError, match="chunk sizes are unknown"):
op()
def test_index_array_with_array_1d():
x = np.arange(10)
dx = da.from_array(x, chunks=(5,))
dx._chunks = ((np.nan, np.nan),)
assert_eq(x[x > 6], dx[dx > 6])
assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])
dy = da.ones(11, chunks=(3,))
with pytest.raises(ValueError):
dx[dy > 5]
def test_index_array_with_array_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x, chunks=(2, 2))
assert_eq(x[x > 6], dx[dx > 6])
assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])
# Test with unknown chunks
dx._chunks = ((2, 2), (np.nan, np.nan, np.nan))
with pytest.warns(UserWarning, match="different ordering") as record:
assert sorted(x[x % 2 == 0].tolist()) == sorted(
dx[dx % 2 == 0].compute().tolist()
)
assert sorted(x[x > 6].tolist()) == sorted(dx[dx > 6].compute().tolist())
assert len(record) == 2
@pytest.mark.xfail(reason="Chunking does not align well")
def test_index_array_with_array_3d_2d():
x = np.arange(4 ** 3).reshape((4, 4, 4))
dx = da.from_array(x, chunks=(2, 2, 2))
ind = np.random.random((4, 4)) > 0.5
ind = np.arange(4 ** 2).reshape((4, 4)) % 2 == 0
dind = da.from_array(ind, (2, 2))
assert_eq(x[ind], dx[dind])
assert_eq(x[:, ind], dx[:, dind])
def test_setitem_1d():
x = np.arange(10)
dx = da.from_array(x.copy(), chunks=(5,))
x[x > 6] = -1
x[x % 2 == 0] = -2
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
assert_eq(x, dx)
def test_setitem_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x.copy(), chunks=(2, 2))
x[x > 6] = -1
x[x % 2 == 0] = -2
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
assert_eq(x, dx)
def test_setitem_extended_API_0d():
# 0-d array
x = np.array(9)
dx = da.from_array(9)
x[()] = -1
dx[()] = -1
assert_eq(x, dx.compute())
x[...] = -11
dx[...] = -11
assert_eq(x, dx.compute())
def test_setitem_extended_API_1d():
# 1-d array
x = np.arange(10)
dx = da.from_array(x.copy(), chunks=(4, 6))
x[2:8:2] = -1
dx[2:8:2] = -1
assert_eq(x, dx.compute())
x[...] = -11
dx[...] = -11
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[Ellipsis, -1],
[(slice(None, None, 2), slice(None, None, -1)), -1],
[slice(1, None, 2), -1],
[[4, 3, 1], -1],
[(Ellipsis, 4), -1],
[5, -1],
[(slice(None), 2), range(6)],
[3, range(10)],
[(slice(None), [3, 5, 6]), [-30, -31, -32]],
[([-1, 0, 1], 2), [-30, -31, -32]],
[(slice(None, 2), slice(None, 3)), [-50, -51, -52]],
[(slice(None), [6, 1, 3]), [-60, -61, -62]],
[(slice(1, 3), slice(1, 4)), [[-70, -71, -72]]],
[(slice(None), [9, 8, 8]), [-80, -81, 91]],
[([True, False, False, False, True, False], 2), -1],
[(3, [True, True, False, True, True, False, True, False, True, True]), -1],
[(np.array([False, False, True, True, False, False]), slice(5, 7)), -1],
[
(
4,
da.from_array(
[False, False, True, True, False, False, True, False, False, True]
),
),
-1,
],
],
)
def test_setitem_extended_API_2d(index, value):
# 2-d array
x = np.ma.arange(60).reshape((6, 10))
dx = da.from_array(x, chunks=(2, 3))
dx[index] = value
x[index] = value
assert_eq(x, dx.compute())
def test_setitem_extended_API_2d_rhs_func_of_lhs():
# Cases:
# * RHS and/or indices are a function of the LHS
# * Indices have unknown chunk sizes
# * RHS has extra leading size 1 dimensions compared to LHS
x = np.arange(60).reshape((6, 10))
chunks = (2, 3)
dx = da.from_array(x, chunks=chunks)
dx[2:4, dx[0] > 3] = -5
x[2:4, x[0] > 3] = -5
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[2, dx[0] < -2] = -7
x[2, x[0] < -2] = -7
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[dx % 2 == 0] = -8
x[x % 2 == 0] = -8
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[dx % 2 == 0] = -8
x[x % 2 == 0] = -8
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[3:5, 5:1:-2] = -dx[:2, 4:1:-2]
x[3:5, 5:1:-2] = -x[:2, 4:1:-2]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0, 1:3] = -dx[0, 4:2:-1]
x[0, 1:3] = -x[0, 4:2:-1]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[...] = dx
x[...] = x
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[...] = dx[...]
x[...] = x[...]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0] = dx[-1]
x[0] = x[-1]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[0, :] = dx[-2, :]
x[0, :] = x[-2, :]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[:, 1] = dx[:, -3]
x[:, 1] = x[:, -3]
assert_eq(x, dx.compute())
index = da.from_array([0, 2], chunks=(2,))
dx = da.from_array(x, chunks=chunks)
dx[index, 8] = [99, 88]
x[[0, 2], 8] = [99, 88]
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=chunks)
dx[:, index] = dx[:, :2]
x[:, [0, 2]] = x[:, :2]
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
dx = da.from_array(x, chunks=chunks)
dx[index, 7] = [-23, -33]
x[index.compute(), 7] = [-23, -33]
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
dx = da.from_array(x, chunks=chunks)
dx[(index,)] = -34
x[(index.compute(),)] = -34
assert_eq(x, dx.compute())
index = index - 4
dx = da.from_array(x, chunks=chunks)
dx[index, 7] = [-43, -53]
x[index.compute(), 7] = [-43, -53]
assert_eq(x, dx.compute())
index = da.from_array([0, -1], chunks=(1,))
x[[0, -1]] = 9999
dx[(index,)] = 9999
assert_eq(x, dx.compute())
dx = da.from_array(x, chunks=(-1, -1))
dx[...] = da.from_array(x, chunks=chunks)
assert_eq(x, dx.compute())
# RHS has extra leading size 1 dimensions compared to LHS
dx = da.from_array(x.copy(), chunks=(2, 3))
v = x.reshape((1, 1) + x.shape)
x[...] = v
dx[...] = v
assert_eq(x, dx.compute())
index = da.where(da.arange(3, chunks=(1,)) < 2)[0]
v = -np.arange(12).reshape(1, 1, 6, 2)
x[:, [0, 1]] = v
dx[:, index] = v
assert_eq(x, dx.compute())
@pytest.mark.parametrize(
"index, value",
[
[(1, slice(1, 7, 2)), np.ma.masked],
[(slice(1, 5, 2), [7, 5]), np.ma.masked_all((2, 2))],
],
)
def test_setitem_extended_API_2d_mask(index, value):
x = np.ma.arange(60).reshape((6, 10))
dx = da.from_array(x.data, chunks=(2, 3))
dx[index] = value
x[index] = value
dx = dx.persist()
assert_eq(x, dx.compute())
assert_eq(x.mask, da.ma.getmaskarray(dx).compute())
def test_setitem_on_read_only_blocks():
# Outputs of broadcast_trick-style functions contain read-only
# arrays
dx = da.empty((4, 6), dtype=float, chunks=(2, 2))
dx[0] = 99
assert_eq(dx[0, 0], 99.0)
dx[0:2] = 88
assert_eq(dx[0, 0], 88.0)
def test_setitem_errs():
x = da.ones((4, 4), chunks=(2, 2))
with pytest.raises(ValueError):
x[x > 1] = x
# Shape mismatch
with pytest.raises(ValueError):
x[[True, True, False, False], 0] = [2, 3, 4]
with pytest.raises(ValueError):
x[[True, True, True, False], 0] = [2, 3]
x = da.ones((4, 4), chunks=(2, 2))
with pytest.raises(ValueError):
x[0, da.from_array([True, False, False, True])] = [2, 3, 4]
x = da.ones((4, 4), chunks=(2, 2))
with pytest.raises(ValueError):
x[0, da.from_array([True, True, False, False])] = [2, 3, 4]
x = da.ones((4, 4), chunks=(2, 2))
with pytest.raises(ValueError):
x[da.from_array([True, True, True, False]), 0] = [2, 3]
x = da.ones((4, 4), chunks=(2, 2))
# Too many indices
with pytest.raises(IndexError):
x[:, :, :] = 2
# 2-d boolean indexing a single dimension
with pytest.raises(IndexError):
x[[[True, True, False, False]], 0] = 5
# Too many/not enough booleans
with pytest.raises(IndexError):
x[[True, True, False]] = 5
with pytest.raises(IndexError):
x[[False, True, True, True, False]] = 5
# 2-d indexing a single dimension
with pytest.raises(IndexError):
x[[[1, 2, 3]], 0] = 5
# Multiple 1-d boolean/integer arrays
with pytest.raises(NotImplementedError):
x[[1, 2], [2, 3]] = 6
with pytest.raises(NotImplementedError):
x[[True, True, False, False], [2, 3]] = 5
with pytest.raises(NotImplementedError):
x[[True, True, False, False], [False, True, False, False]] = 7
# scalar boolean indexing
with pytest.raises(NotImplementedError):
x[True] = 5
with pytest.raises(NotImplementedError):
x[np.array(True)] = 5
with pytest.raises(NotImplementedError):
x[0, da.from_array(True)] = 5
# Scalar arrays
y = da.from_array(np.array(1))
with pytest.raises(IndexError):
y[:] = 2
# RHS has non-brodacastable extra leading dimensions
x = np.arange(12).reshape((3, 4))
dx = da.from_array(x, chunks=(2, 2))
with pytest.raises(ValueError):
dx[...] = np.arange(24).reshape((2, 1, 3, 4))
# RHS doesn't have chunks set
dx = da.unique(da.random.random([10]))
with pytest.raises(ValueError, match="Arrays chunk sizes are unknown"):
dx[0] = 0
def test_zero_slice_dtypes():
x = da.arange(5, chunks=1)
y = x[[]]
assert y.dtype == x.dtype
assert y.shape == (0,)
assert_eq(x[[]], np.arange(5)[[]])
def test_zero_sized_array_rechunk():
x = da.arange(5, chunks=1)[:0]
y = da.blockwise(identity, "i", x, "i", dtype=x.dtype)
assert_eq(x, y)
def test_blockwise_zero_shape():
da.blockwise(
lambda x: x,
"i",
da.arange(10, chunks=10),
"i",
da.from_array(np.ones((0, 2)), ((0,), 2)),
"ab",
da.from_array(np.ones((0,)), ((0,),)),
"a",
dtype="float64",
)
def test_blockwise_zero_shape_new_axes():
da.blockwise(
lambda x: np.ones(42),
"i",
da.from_array(np.ones((0, 2)), ((0,), 2)),
"ab",
da.from_array(np.ones((0,)), ((0,),)),
"a",
dtype="float64",
new_axes={"i": 42},
)
def test_broadcast_against_zero_shape():
assert_eq(da.arange(1, chunks=1)[:0] + 0, np.arange(1)[:0] + 0)
assert_eq(da.arange(1, chunks=1)[:0] + 0.1, np.arange(1)[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0, np.ones((5, 5))[:0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0.1, np.ones((5, 5))[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0, np.ones((5, 5))[:, :0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0.1, np.ones((5, 5))[:, :0] + 0.1)
def test_from_array_name():
x = np.array([1, 2, 3, 4, 5])
chunks = x.shape
# Default is tokenize the array
dx = da.from_array(x, chunks=chunks)
hashed_name = dx.name
assert da.from_array(x, chunks=chunks).name == hashed_name
# Specify name directly
assert da.from_array(x, chunks=chunks, name="x").name == "x"
# False gives a random name
dx2 = da.from_array(x, chunks=chunks, name=False)
dx3 = da.from_array(x, chunks=chunks, name=False)
assert dx2.name != hashed_name
assert dx3.name != hashed_name
assert dx2.name != dx3.name
def test_concatenate_errs():
with pytest.raises(ValueError, match=r"Shapes.*\(2, 1\)"):
da.concatenate(
[da.zeros((2, 1), chunks=(2, 1)), da.zeros((2, 3), chunks=(2, 3))]
)
with pytest.raises(ValueError):
da.concatenate(
[da.zeros((1, 2), chunks=(1, 2)), da.zeros((3, 2), chunks=(3, 2))], axis=1
)
def test_stack_errs():
with pytest.raises(ValueError) as e:
da.stack([da.zeros((2,), chunks=2)] * 10 + [da.zeros((3,), chunks=3)] * 10)
assert (
str(e.value)
== "Stacked arrays must have the same shape. The first array had shape (2,), while array 11 has shape (3,)."
)
assert len(str(e.value)) < 105
def test_blockwise_with_numpy_arrays():
x = np.ones(10)
y = da.ones(10, chunks=(5,))
assert_eq(x + y, x + x)
s = da.sum(x)
assert any(x is v for v in s.dask.values())
@pytest.mark.parametrize("chunks", (100, 6))
@pytest.mark.parametrize("other", [[0, 0, 1], [2, 1, 3], (0, 0, 1)])
def test_elemwise_with_lists(chunks, other):
x = np.arange(12).reshape((4, 3))
d = da.arange(12, chunks=chunks).reshape((4, 3))
x2 = np.vstack([x[:, 0], x[:, 1], x[:, 2]]).T
d2 = da.vstack([d[:, 0], d[:, 1], d[:, 2]]).T
assert_eq(x2, d2)
x3 = x2 * other
d3 = d2 * other
assert_eq(x3, d3)
def test_constructor_plugin():
L = []
L2 = []
with dask.config.set(array_plugins=[L.append, L2.append]):
x = da.ones(10, chunks=5)
y = x + 1
assert L == L2 == [x, y]
with dask.config.set(array_plugins=[lambda x: x.compute()]):
x = da.ones(10, chunks=5)
y = x + 1
assert isinstance(y, np.ndarray)
assert len(L) == 2
def test_no_warnings_on_metadata():
x = da.ones(5, chunks=3)
with warnings.catch_warnings(record=True) as record:
da.arccos(x)
assert not record
def test_delayed_array_key_hygeine():
a = da.zeros((1,), chunks=(1,))
d = delayed(identity)(a)
b = da.from_delayed(d, shape=a.shape, dtype=a.dtype)
assert_eq(a, b)
def test_empty_chunks_in_array_len():
x = da.ones((), chunks=())
with pytest.raises(TypeError) as exc_info:
len(x)
err_msg = "len() of unsized object"
assert err_msg in str(exc_info.value)
@pytest.mark.parametrize("dtype", [None, [("a", "f4"), ("b", object)]])
def test_meta(dtype):
a = da.zeros((1,), chunks=(1,))
assert a._meta.dtype == a.dtype
assert isinstance(a._meta, np.ndarray)
assert a.nbytes < 1000
@pytest.mark.parametrize(
"shape,limit,expected",
[
(100, 10, (10,) * 10),
(20, 10, (10, 10)),
(20, 5, (5, 5, 5, 5)),
(24, 5, (4, 4, 4, 4, 4, 4)), # common factor is close, use it
(23, 5, (5, 5, 5, 5, 3)), # relatively prime, don't use 1s
(1000, 167, (125,) * 8), # find close value
],
)
def test_normalize_chunks_auto_1d(shape, limit, expected):
result = normalize_chunks("auto", (shape,), limit=limit, dtype=np.uint8)
assert result == (expected,)
@pytest.mark.parametrize(
"shape,chunks,limit,expected",
[
((20, 20), ("auto", 2), 20, ((10, 10), (2,) * 10)),
(
(20, 20),
("auto", (2, 2, 2, 2, 2, 5, 5)),
20,
((4, 4, 4, 4, 4), (2, 2, 2, 2, 2, 5, 5)),
),
((1, 20), "auto", 10, ((1,), (10, 10))),
],
)
def test_normalize_chunks_auto_2d(shape, chunks, limit, expected):
result = normalize_chunks(chunks, shape, limit=limit, dtype="uint8")
assert result == expected
def test_normalize_chunks_auto_3d():
result = normalize_chunks(
("auto", "auto", 2), (20, 20, 20), limit=200, dtype="uint8"
)
expected = ((10, 10), (10, 10), (2,) * 10)
assert result == expected
result = normalize_chunks("auto", (20, 20, 20), limit=8, dtype="uint8")
expected = ((2,) * 10,) * 3
assert result == expected
def test_constructors_chunks_dict():
x = da.ones((20, 20), chunks={0: 10, 1: 5})
assert x.chunks == ((10, 10), (5, 5, 5, 5))
x = da.ones((20, 20), chunks={0: 10, 1: "auto"})
assert x.chunks == ((10, 10), (20,))
def test_from_array_chunks_dict():
with dask.config.set({"array.chunk-size": "128kiB"}):
x = np.empty((100, 100, 100))
y = da.from_array(x, chunks={0: 10, 1: -1, 2: "auto"})
z = da.from_array(x, chunks=(10, 100, 10))
assert y.chunks == z.chunks
@pytest.mark.parametrize("dtype", [object, [("a", object), ("b", int)]])
def test_normalize_chunks_object_dtype(dtype):
x = np.array(["a", "abc"], dtype=object)
with pytest.raises(NotImplementedError):
da.from_array(x, chunks="auto")
def test_normalize_chunks_tuples_of_tuples():
result = normalize_chunks(((2, 3, 5), "auto"), (10, 10), limit=10, dtype=np.uint8)
expected = ((2, 3, 5), (2, 2, 2, 2, 2))
assert result == expected
def test_normalize_chunks_nan():
with pytest.raises(ValueError) as info:
normalize_chunks("auto", (np.nan,), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
with pytest.raises(ValueError) as info:
normalize_chunks(((np.nan, np.nan), "auto"), (10, 10), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
def test_pandas_from_dask_array():
pd = pytest.importorskip("pandas")
from dask.dataframe._compat import PANDAS_GT_130, PANDAS_GT_131
a = da.ones((12,), chunks=4)
s = pd.Series(a, index=range(12))
if PANDAS_GT_130 and not PANDAS_GT_131:
# https://github.com/pandas-dev/pandas/issues/38645
assert s.dtype != a.dtype
else:
assert s.dtype == a.dtype
assert_eq(s.values, a)
def test_from_zarr_unique_name():
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
b = zarr.array([4, 5, 6])
assert da.from_zarr(a).name != da.from_zarr(b).name
def test_from_zarr_name():
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
assert da.from_zarr(a, name="foo").name == "foo"
def test_zarr_roundtrip():
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
@pytest.mark.parametrize("compute", [False, True])
def test_zarr_return_stored(compute):
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a2 = a.to_zarr(d, compute=compute, return_stored=True)
assert isinstance(a2, Array)
assert_eq(a, a2, check_graph=False)
assert a2.chunks == a.chunks
def test_zarr_inline_array():
zarr = pytest.importorskip("zarr")
a = zarr.array([1, 2, 3])
dsk = dict(da.from_zarr(a, inline_array=True).dask)
assert len(dsk) == 1
assert a in list(dsk.values())[0]
def test_zarr_existing_array():
zarr = pytest.importorskip("zarr")
c = (1, 1)
a = da.ones((3, 3), chunks=c)
z = zarr.zeros_like(a, chunks=c)
a.to_zarr(z)
a2 = da.from_zarr(z)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_to_zarr_unknown_chunks_raises():
pytest.importorskip("zarr")
a = da.random.random((10,), chunks=(3,))
a = a[a > 0.5]
with pytest.raises(ValueError, match="unknown chunk sizes"):
a.to_zarr({})
def test_read_zarr_chunks():
pytest.importorskip("zarr")
a = da.zeros((9,), chunks=(3,))
with tmpdir() as d:
a.to_zarr(d)
arr = da.from_zarr(d, chunks=(5,))
assert arr.chunks == ((5, 4),)
def test_zarr_pass_mapper():
pytest.importorskip("zarr")
import zarr.storage
with tmpdir() as d:
mapper = zarr.storage.DirectoryStore(d)
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(mapper)
a2 = da.from_zarr(mapper)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_group():
zarr = pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d, component="test")
with pytest.raises((OSError, ValueError)):
a.to_zarr(d, component="test", overwrite=False)
a.to_zarr(d, component="test", overwrite=True)
# second time is fine, group exists
a.to_zarr(d, component="test2", overwrite=False)
a.to_zarr(d, component="nested/test", overwrite=False)
group = zarr.open_group(d, mode="r")
assert list(group) == ["nested", "test", "test2"]
assert "test" in group["nested"]
a2 = da.from_zarr(d, component="test")
assert_eq(a, a2)
assert a2.chunks == a.chunks
@pytest.mark.parametrize(
"data",
[
[(), True],
[((1,),), True],
[((1, 1, 1),), True],
[((1,), (1,)), True],
[((2, 2, 1),), True],
[((2, 2, 3),), False],
[((1, 1, 1), (2, 2, 3)), False],
[((1, 2, 1),), False],
],
)
def test_regular_chunks(data):
chunkset, expected = data
assert da.core._check_regular_chunks(chunkset) == expected
def test_zarr_nocompute():
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
out = a.to_zarr(d, compute=False)
assert isinstance(out, Delayed)
dask.compute(out)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_tiledb_roundtrip():
tiledb = pytest.importorskip("tiledb")
# 1) load with default chunking
# 2) load from existing tiledb.DenseArray
# 3) write to existing tiledb.DenseArray
a = da.random.random((3, 3))
with tmpdir() as uri:
da.to_tiledb(a, uri)
tdb = da.from_tiledb(uri)
assert_eq(a, tdb)
assert a.chunks == tdb.chunks
# from tiledb.array
with tiledb.open(uri) as t:
tdb2 = da.from_tiledb(t)
assert_eq(a, tdb2)
with tmpdir() as uri2:
with tiledb.empty_like(uri2, a) as t:
a.to_tiledb(t)
assert_eq(da.from_tiledb(uri2), a)
# specific chunking
with tmpdir() as uri:
a = da.random.random((3, 3), chunks=(1, 1))
a.to_tiledb(uri)
tdb = da.from_tiledb(uri)
assert_eq(a, tdb)
assert a.chunks == tdb.chunks
def test_tiledb_multiattr():
tiledb = pytest.importorskip("tiledb")
dom = tiledb.Domain(
tiledb.Dim("x", (0, 1000), tile=100), tiledb.Dim("y", (0, 1000), tile=100)
)
schema = tiledb.ArraySchema(
attrs=(tiledb.Attr("attr1"), tiledb.Attr("attr2")), domain=dom
)
with tmpdir() as uri:
tiledb.DenseArray.create(uri, schema)
tdb = tiledb.DenseArray(uri, "w")
ar1 = np.random.randn(*tdb.schema.shape)
ar2 = np.random.randn(*tdb.schema.shape)
tdb[:] = {"attr1": ar1, "attr2": ar2}
tdb = tiledb.DenseArray(uri, "r")
# basic round-trip from dask.array
d = da.from_tiledb(uri, attribute="attr2")
assert_eq(d, ar2)
# smoke-test computation directly on the TileDB view
d = da.from_tiledb(uri, attribute="attr2")
assert_eq(np.mean(ar2), d.mean().compute(scheduler="threads"))
def test_blocks_indexer():
x = da.arange(10, chunks=2)
assert isinstance(x.blocks[0], da.Array)
assert_eq(x.blocks[0], x[:2])
assert_eq(x.blocks[-1], x[-2:])
assert_eq(x.blocks[:3], x[:6])
assert_eq(x.blocks[[0, 1, 2]], x[:6])
assert_eq(x.blocks[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
x = da.random.random((20, 20), chunks=(4, 5))
assert_eq(x.blocks[0], x[:4])
assert_eq(x.blocks[0, :3], x[:4, :15])
assert_eq(x.blocks[:, :3], x[:, :15])
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
assert_eq(x.blocks[0, :, 0], np.ones((10, 40, 10)))
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
x.blocks[[0, 1], [0, 1]]
with pytest.raises(ValueError):
x.blocks[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
x.blocks[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
x.blocks[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
x.blocks[100, 100]
def test_partitions_indexer():
# .partitions is an alias of .blocks for dask arrays
x = da.arange(10, chunks=2)
assert isinstance(x.partitions[0], da.Array)
assert_eq(x.partitions[0], x[:2])
assert_eq(x.partitions[-1], x[-2:])
assert_eq(x.partitions[:3], x[:6])
assert_eq(x.partitions[[0, 1, 2]], x[:6])
assert_eq(x.partitions[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
x = da.random.random((20, 20), chunks=(4, 5))
assert_eq(x.partitions[0], x[:4])
assert_eq(x.partitions[0, :3], x[:4, :15])
assert_eq(x.partitions[:, :3], x[:, :15])
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
assert_eq(x.partitions[0, :, 0], np.ones((10, 40, 10)))
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
x.partitions[[0, 1], [0, 1]]
with pytest.raises(ValueError):
x.partitions[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
x.partitions[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
x.partitions[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
x.partitions[100, 100]
@pytest.mark.filterwarnings("ignore:the matrix subclass:PendingDeprecationWarning")
def test_dask_array_holds_scipy_sparse_containers():
pytest.importorskip("scipy.sparse")
import scipy.sparse
x = da.random.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
xx = x.compute()
y = x.map_blocks(scipy.sparse.csr_matrix)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler="single-threaded")
assert all(isinstance(v, scipy.sparse.csr_matrix) for v in values)
yy = y.compute(scheduler="single-threaded")
assert isinstance(yy, scipy.sparse.spmatrix)
assert (yy == xx).all()
z = x.T.map_blocks(scipy.sparse.csr_matrix)
zz = z.compute(scheduler="single-threaded")
assert isinstance(zz, scipy.sparse.spmatrix)
assert (zz == xx.T).all()
@pytest.mark.parametrize("axis", [0, 1])
def test_scipy_sparse_concatenate(axis):
pytest.importorskip("scipy.sparse")
import scipy.sparse
rs = da.random.RandomState(RandomState=np.random.RandomState)
xs = []
ys = []
for i in range(2):
x = rs.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
xs.append(x)
ys.append(x.map_blocks(scipy.sparse.csr_matrix))
z = da.concatenate(ys, axis=axis)
z = z.compute()
if axis == 0:
sp_concatenate = scipy.sparse.vstack
elif axis == 1:
sp_concatenate = scipy.sparse.hstack
z_expected = sp_concatenate([scipy.sparse.csr_matrix(e.compute()) for e in xs])
assert (z != z_expected).nnz == 0
def test_3851():
with warnings.catch_warnings() as record:
Y = da.random.random((10, 10), chunks="auto")
da.argmax(Y, axis=0).compute()
assert not record
def test_3925():
x = da.from_array(np.array(["a", "b", "c"], dtype=object), chunks=-1)
assert (x[0] == x[0]).compute(scheduler="sync")
def test_map_blocks_large_inputs_delayed():
a = da.ones(10, chunks=(5,))
b = np.ones(1000000)
c = a.map_blocks(add, b)
assert any(b is v for v in c.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
d = a.map_blocks(lambda x, y: x + y.sum(), y=b)
assert_eq(d, d)
assert any(b is v for v in d.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
def test_blockwise_large_inputs_delayed():
a = da.ones(10, chunks=(5,))
b = np.ones(1000000)
c = da.blockwise(add, "i", a, "i", b, None, dtype=a.dtype)
assert any(b is v for v in c.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
d = da.blockwise(lambda x, y: x + y, "i", a, "i", y=b, dtype=a.dtype)
assert any(b is v for v in d.dask.values())
assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence
def test_slice_reversed():
x = da.ones(10, chunks=-1)
y = x[6:3]
assert_eq(y, np.ones(0))
def test_map_blocks_chunks():
x = da.arange(400, chunks=(100,))
y = da.arange(40, chunks=(10,))
def func(a, b):
return np.array([a.max(), b.max()])
assert_eq(
da.map_blocks(func, x, y, chunks=(2,), dtype=x.dtype),
np.array([99, 9, 199, 19, 299, 29, 399, 39]),
)
def test_nbytes_auto():
chunks = normalize_chunks("800B", shape=(500,), dtype="float64")
assert chunks == ((100, 100, 100, 100, 100),)
chunks = normalize_chunks("200B", shape=(10, 10), dtype="float64")
assert chunks == ((5, 5), (5, 5))
chunks = normalize_chunks((5, "200B"), shape=(10, 10), dtype="float64")
assert chunks == ((5, 5), (5, 5))
chunks = normalize_chunks("33B", shape=(10, 10), dtype="float64")
assert chunks == ((2, 2, 2, 2, 2), (2, 2, 2, 2, 2))
chunks = normalize_chunks("1800B", shape=(10, 20, 30), dtype="float64")
assert chunks == ((5, 5), (5, 5, 5, 5), (6, 6, 6, 6, 6))
with pytest.raises(ValueError):
normalize_chunks("10B", shape=(10,), limit=20, dtype="float64")
with pytest.raises(ValueError):
normalize_chunks("100B", shape=(10, 10), limit=20, dtype="float64")
with pytest.raises(ValueError):
normalize_chunks(("100B", "10B"), shape=(10, 10), dtype="float64")
with pytest.raises(ValueError):
normalize_chunks(("10B", "10B"), shape=(10, 10), limit=20, dtype="float64")
def test_auto_chunks_h5py():
h5py = pytest.importorskip("h5py")
with tmpfile(".hdf5") as fn:
with h5py.File(fn, mode="a") as f:
d = f.create_dataset(
"/x", shape=(1000, 1000), chunks=(32, 64), dtype="float64"
)
d[:] = 1
with h5py.File(fn, mode="a") as f:
d = f["x"]
with dask.config.set({"array.chunk-size": "1 MiB"}):
x = da.from_array(d)
assert isinstance(x._meta, np.ndarray)
assert x.chunks == ((256, 256, 256, 232), (512, 488))
def test_no_warnings_from_blockwise():
with pytest.warns(None) as record:
x = da.ones((3, 10, 10), chunks=(3, 2, 2))
da.map_blocks(lambda y: np.mean(y, axis=0), x, dtype=x.dtype, drop_axis=0)
assert not record
with pytest.warns(None) as record:
x = da.ones((15, 15), chunks=(5, 5))
(x.dot(x.T + 1) - x.mean(axis=0)).std()
assert not record
with pytest.warns(None) as record:
x = da.ones((1,), chunks=(1,))
1 / x[0]
assert not record
def test_from_array_meta():
sparse = pytest.importorskip("sparse")
x = np.ones(10)
meta = sparse.COO.from_numpy(x)
y = da.from_array(x, meta=meta)
assert isinstance(y._meta, sparse.COO)
def test_compute_chunk_sizes():
x = da.from_array(np.linspace(-1, 1, num=50), chunks=10)
y = x[x < 0]
assert np.isnan(y.shape[0])
assert y.chunks == ((np.nan,) * 5,)
z = y.compute_chunk_sizes()
assert y is z
assert z.chunks == ((10, 10, 5, 0, 0),)
assert len(z) == 25
# check that dtype of chunk dimensions is `int`
assert isinstance(z.chunks[0][0], int)
def test_compute_chunk_sizes_2d_array():
X = np.linspace(-1, 1, num=9 * 4).reshape(9, 4)
X = da.from_array(X, chunks=(3, 4))
idx = X.sum(axis=1) > 0
Y = X[idx]
# This is very similar to the DataFrame->Array conversion
assert np.isnan(Y.shape[0]) and Y.shape[1] == 4
assert Y.chunks == ((np.nan, np.nan, np.nan), (4,))
Z = Y.compute_chunk_sizes()
assert Y is Z
assert Z.chunks == ((0, 1, 3), (4,))
assert Z.shape == (4, 4)
def test_compute_chunk_sizes_3d_array(N=8):
X = np.linspace(-1, 2, num=8 * 8 * 8).reshape(8, 8, 8)
X = da.from_array(X, chunks=(4, 4, 4))
idx = X.sum(axis=0).sum(axis=0) > 0
Y = X[idx]
idx = X.sum(axis=1).sum(axis=1) < 0
Y = Y[:, idx]
idx = X.sum(axis=2).sum(axis=1) > 0.1
Y = Y[:, :, idx]
# Checking to make sure shapes are different on outputs
assert Y.compute().shape == (8, 3, 5)
assert X.compute().shape == (8, 8, 8)
assert Y.chunks == ((np.nan, np.nan),) * 3
assert all(np.isnan(s) for s in Y.shape)
Z = Y.compute_chunk_sizes()
assert Z is Y
assert Z.shape == (8, 3, 5)
assert Z.chunks == ((4, 4), (3, 0), (1, 4))
def _known(num=50):
return da.from_array(np.linspace(-1, 1, num=num), chunks=10)
@pytest.fixture()
def unknown():
x = _known()
y = x[x < 0]
assert y.chunks == ((np.nan,) * 5,)
return y
def test_compute_chunk_sizes_warning_fixes_rechunk(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
y.rechunk("auto")
y.compute_chunk_sizes()
y.rechunk("auto")
def test_compute_chunk_sizes_warning_fixes_to_zarr(unknown):
pytest.importorskip("zarr")
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
with StringIO() as f:
y.to_zarr(f)
y.compute_chunk_sizes()
with pytest.raises(ValueError, match="irregular chunking"):
with StringIO() as f:
y.to_zarr(f)
def test_compute_chunk_sizes_warning_fixes_to_svg(unknown):
y = unknown
with pytest.raises(NotImplementedError, match="compute_chunk_sizes"):
y.to_svg()
y.compute_chunk_sizes()
y.to_svg()
def test_compute_chunk_sizes_warning_fixes_concatenate():
x = _known(num=100).reshape(10, 10)
idx = x.sum(axis=0) > 0
y1 = x[idx]
y2 = x[idx]
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.concatenate((y1, y2), axis=1)
y1.compute_chunk_sizes()
y2.compute_chunk_sizes()
da.concatenate((y1, y2), axis=1)
def test_compute_chunk_sizes_warning_fixes_reduction(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.argmin(y)
y.compute_chunk_sizes()
da.argmin(y)
def test_compute_chunk_sizes_warning_fixes_reshape(unknown):
y = unknown
with pytest.raises(ValueError, match="compute_chunk_sizes"):
da.reshape(y, (5, 5))
y.compute_chunk_sizes()
da.reshape(y, (5, 5))
def test_compute_chunk_sizes_warning_fixes_slicing():
x = _known(num=100).reshape(10, 10)
y = x[x.sum(axis=0) < 0]
with pytest.raises(ValueError, match="compute_chunk_sizes"):
y[:3, :]
y.compute_chunk_sizes()
y[:3, :]
def test_rechunk_auto():
x = da.ones(10, chunks=(1,))
y = x.rechunk()
assert y.npartitions == 1
def test_map_blocks_series():
pd = pytest.importorskip("pandas")
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq as dd_assert_eq
x = da.ones(10, chunks=(5,))
s = x.map_blocks(pd.Series)
assert isinstance(s, dd.Series)
assert s.npartitions == x.npartitions
dd_assert_eq(s, s)
@pytest.mark.xfail(reason="need to remove singleton index dimension")
def test_map_blocks_dataframe():
pd = pytest.importorskip("pandas")
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq as dd_assert_eq
x = da.ones((10, 2), chunks=(5, 2))
s = x.map_blocks(pd.DataFrame)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == x.npartitions
dd_assert_eq(s, s)
def test_dask_layers():
a = da.ones(1)
assert a.dask.layers.keys() == {a.name}
assert a.dask.dependencies == {a.name: set()}
assert a.__dask_layers__() == (a.name,)
b = a + 1
assert b.dask.layers.keys() == {a.name, b.name}
assert b.dask.dependencies == {a.name: set(), b.name: {a.name}}
assert b.__dask_layers__() == (b.name,)
| 29.346897 | 116 | 0.553052 |
acdfd4ee05ae4d1fd754fdf34f9046ec9551bdd7 | 13,379 | py | Python | app/ImgProcessing.py | tamaUdon/web_OCR_app | 3fc08ed27b9e30406514f5d8c95c1a6722007890 | [
"MIT"
] | null | null | null | app/ImgProcessing.py | tamaUdon/web_OCR_app | 3fc08ed27b9e30406514f5d8c95c1a6722007890 | [
"MIT"
] | null | null | null | app/ImgProcessing.py | tamaUdon/web_OCR_app | 3fc08ed27b9e30406514f5d8c95c1a6722007890 | [
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import io
import tempfile
import requests
import sys
import tkinter
import time
import numpy as np
import cv2
from imutils.object_detection import non_max_suppression
from sklearn.mixture import BayesianGaussianMixture
import math
import pandas as pd
class ImgProcessing:
def get_resized_img(self, image):
(H, W) = image.shape[:2]
(newW, newH) = (512, 512)
rW = W / float(newW)
rH = H / float(newH)
# Image expand
if rW or rH > 1:
resized_img = cv2.resize(image, (newW, newH), interpolation = cv2.INTER_CUBIC)
# Image reduction
elif rW or rH < 1:
resized_img = cv2.resize(image, (newW, newH), interpolation = cv2.cv2.INTER_AREA)
else:
resized_img = image
cv2.imshow("resize", resized_img)
return resized_img
def get_binarized_img(self,resized_im):
gray = cv2.cvtColor(resized_im, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 1, 21, 11) # NEED TO TUNE
cv2.imwrite("alphabet-th.png", thresh)
return thresh
# EAST detector
# Reference from <https://www.pyimagesearch.com/2018/08/20/opencv-text-detection-east-text-detector/>
def get_detected_texts_coordinates(self, resize):
(H, W) = resize.shape[:2]
orig = resize.copy()
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet("frozen_east_text_detection.pb")
blob = cv2.dnn.blobFromImage(resize, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()
print("[INFO] text detection took {:.6f} seconds".format(end - start))
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
coordinates = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
min_confidence = 0.5
for x in range(0, numCols):
if scoresData[x] < min_confidence:
continue
(offsetX, offsetY) = (x * 4.0, y * 4.0)
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x])) + 5
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x])) + 5
startX = int(endX - w) - 10
startY = int(endY - h) - 10
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
boxes = non_max_suppression(np.array(rects), probs=confidences)
# rectangle
index_boxes = []
for (startX, startY, endX, endY) in boxes:
cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 1)
index = endY * W + startX
index_boxes.append([index, startX, startY, endX, endY])
# sort
index_boxes = sorted(index_boxes, key=lambda x:x[0])
print("index"), print(index_boxes)
cv2.imwrite("text.png", orig)
return index_boxes, W
# Extract words
def get_extract_line_im(self, rects, binarized_im, im_w):
trim_img = None
line = []
unpacked_rects = []
word_pics = []
for i, r in enumerate(rects):
r = list(r)
index, x, y, endX, endY = r
h = endY - y
w = endX - x
lower_left_y = y + h
right_corner_x = x + w
y2 = round(lower_left_y / 10) * 10
index = y2 * im_w + x
unpacked_rects.append([index, x,y,w,h, lower_left_y, right_corner_x])
trim_img = binarized_im[y:y+h, x:x+w] # get ROI
cv2.imwrite("save/dir/path"+ str(i)+"-trim.png", trim_img)
ww = (round((w if w > h else h) * 1.2)).astype(np.int64)
print(type(ww)), print(type(y))
spc = np.zeros((ww, ww))
wy = (ww-h)//2
wx = (ww-w)//2
spc[wy:wy+h, wx:wx+w] = trim_img # get normalized image
cv2.imwrite("save/dir/path"+ str(i)+"-textz.png", spc)
word_pics.append(spc)
print("unpacked_rects"), print(unpacked_rects)
return unpacked_rects, word_pics
# get each character
def get_charImg(self, word_pics):
char_imgs = []
breakline = np.zeros((28, 28))
breakline = breakline.reshape(-1, 28, 28, 1)
breakline = breakline.astype("float32") / 255
idx = 0
for i, spc in enumerate(word_pics):
# findcontours
uint_im = np.uint8(spc)
char_rects = cv2.findContours(uint_im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
# sort
im_w = spc.shape[1]
rcts = self._get_unpacked_rects(char_rects, im_w)
line = sorted(rcts, key=lambda x:x[1])
# integrate
char_rects = self._get_integrated_rectangle(line)
char_im = self._extract_chars_im(char_rects, spc)
char_imgs.extend(char_im)
# insert breakline
char_imgs.append(breakline)
return char_imgs
def _get_unpacked_rects(self, char_rects, im_w):
rct = []
for rec in char_rects:
x,y,w,h = cv2.boundingRect(rec)
lower_left_y = y + h
right_corner = x + w
y2 = round(lower_left_y / 10) * 10
index = y2 * im_w + x
rct.append([index, x,y,w,h,lower_left_y, right_corner])
print("rct"), print(rct)
return rct
# integrate split region (ex. 'i', 'j')
def _get_integrated_rectangle(self, corners):
x_region = []
del_pt = []
pt_list = self._tuple_to_list(corners)
for i,r in enumerate(pt_list):
md = None
index, x, y, w, h, lower_left_y, right_corner = r
x_region.append(x+w/2)
print("x_region"), print(x_region)
if i > 0:
md = x_region[i-1]
print("md"), print(md)
if x <= md <= x+w:
del_pt.append(i)
if del_pt:
print(del_pt)
for dl in reversed(del_pt):
pt_list[dl][1] = max(pt_list[dl-1][1], pt_list[dl][1]) # x
pt_list[dl][3] = max(pt_list[dl-1][3], pt_list[dl][3]) # w
pt_list[dl][6] = pt_list[dl][1] + pt_list[dl][3] # x+w right_corner
pt_list[dl][2] = min(pt_list[dl-1][2], pt_list[dl][2]) # y
pt_list[dl][5] = max(pt_list[dl-1][5], pt_list[dl][5]) # lower_left_y
pt_list[dl][4] = pt_list[dl][5] - pt_list[dl][2] # h
del pt_list[dl-1]
tuple(pt_list)
return pt_list
# tuple to list
def _tuple_to_list(self, corners):
l = []
for i in corners:
t_list = list(i)
l.append(t_list)
print("to_list"), print(l)
return l
# get each character image
def _extract_chars_im(self, corners, word_pic):
char_im = []
corners = self._get_integrated_rectangle(corners)
for i, cnt in enumerate(corners):
index,x,y,w,h,lower_left_y,right_corner = cnt
trim_img = word_pic[y:y+h, x:x+w]
ww = round((w if w > h else h) * 1.2)#.astype(np.int64)
zeros = np.zeros((ww, ww))
wy = (ww-h)//2
wx = (ww-w)//2
zeros[wy:wy+h, wx:wx+w] = trim_img
# リサイズ、正規化
trim_img = cv2.resize(zeros,(28, 28))
cv2.imwrite("save/dir/path" + str(index)+"-normanilzed.png", trim_img)
trim_img = trim_img.reshape(-1, 28, 28, 1)
trim_img = trim_img.astype("float32") / 255
char_im.append(trim_img)
return char_im
#### Use when you need to rotate image ####
# def get_clusterized_coordinates(self,im):
#
# kp_array = np.empty((0,2), int)
#
# akaze = cv2.AKAZE_create() # AKAZE
# keypoints = akaze.detect(self.thresh)
#
# for marker in keypoints:
# akaze_im = cv2.drawMarker(im,
# tuple(int(i) for i in marker.pt),
# color=(0, 255, 0))
# cv2.imwrite("akaze.png", akaze_im)
# kp_array = np.concatenate([kp_array, np.array([marker.pt])], 0)
#
# #VBGMM
# bgmm = BayesianGaussianMixture(n_components=5,
# weight_concentration_prior_type='dirichlet_process')
# bgmm = bgmm.fit(kp_array)
# cluster = bgmm.predict(kp_array)
# cluster_count = len(bgmm.weights_)
# labeled_Pt_x = [[] for i in range(cluster_count)]
# labeled_Pt_y = [[] for i in range(cluster_count)]
#
# marker_pt = []
# cluster = cluster.tolist()
#
# for i in range(cluster_count):
# for marker in keypoints:
# marker_pt.append([int(n) for n in marker.pt if n != None])
# labeled_Pt_x[i] = [marker_pt[n][0] for n,label in enumerate(cluster) if i == label]
# labeled_Pt_y[i] = [marker_pt[n][1] for n,label in enumerate(cluster) if i == label]
#
# labeled_Pt_x = [l for l in labeled_Pt_x if l != None]
# labeled_Pt_y = [l for l in labeled_Pt_y if l != None]
# print(labeled_Pt_x)
# print(labeled_Pt_y)
#
# return labeled_Pt_x, labeled_Pt_y
#
#
# # rotate
# def get_rotatedImg_rects(self,labeled_Pt_x,labeled_Pt_y,im):
#
# gradients = []
#
# for i in range(len(labeled_Pt_x)):
# kp_x = np.array(labeled_Pt_x[i])
# kp_y = np.array(labeled_Pt_y[i])
#
# ones = np.array([kp_x,np.ones(len(kp_x))])
# ones = ones.T
# a,b = np.linalg.lstsq(ones,kp_y)[0]
# gradients.append(a)
# #max_gradient = gradients[np.argmax(np.abs(gradients))]
# max_gradient = gradients[int(np.mean(gradients))]
#
# rows,cols = self.thresh.shape
# d = math.degrees(max_gradient)
# print(d)
# M = cv2.getRotationMatrix2D((cols/2,rows/2),d,1)
# self.thresh = cv2.warpAffine(self.thresh,M,(cols,rows))
# cv2.imwrite("rotate.png", self.thresh)
#
# kernel = np.ones((6,6),np.uint8)
# line_text_img = cv2.dilate(self.thresh, kernel, iterations=1)
# cv2.imwrite("dilate.png", line_text_img)
#
# # 一行ずつ外接矩形を再認識
# #rects = self._find_contours(line_text_img, im)
# #rects = sorted(rects, key=lambda x:x[0])
#
# return rects
#### draw histgram
# def get_hist_pt(self, hist_img, width, height, axis):
# histgram = []
#
# if axis == 'x':
# for i in range(width):
# px = hist_img[0:height, i]
# #print("px"), print(px)
# hist_n = [1 for x in px if (x == [255, 255, 255]).all()] #[1, 1, 1,...]
# histgram.append(len(hist_n))
#
# plt.bar(range(width), histgram, width=1.0)
# plt.ylim(0, height)
# plt.show()
#
# elif axis == 'y':
# for i in range(height):
# px = hist_img[i, 0:width]
# #print("px"), print(px)
# hist_n = [1 for x in px if (x == [255, 255, 255]).all()] #[1, 1, 1,...]
# histgram.append(len(hist_n))
# plt.bar(range(height), histgram, width=1.0)
# plt.ylim(0, width)
# plt.show()
#
# x_pt = [i for i,x in enumerate(histgram) if x != 0]
# y_pt = [i for i,y in enumerate(histgram) if y != 0]
# pt_abs = [a for i,a in enumerate(x_pt) if i > 0 and abs(a - x_pt[i-1]) > 1]
# print("abs")
# print(pt_abs)
#
# return pt_abs
| 35.582447 | 106 | 0.492264 |
acdfd5b629bb588c6beb18155e5adc649a021e80 | 7,739 | py | Python | src/m4_SimpleTurtle_objects.py | sandergl/01-IntroductionToPython | fcd028283bda4c6f6ca640bbb6e303b5b0e46777 | [
"MIT"
] | null | null | null | src/m4_SimpleTurtle_objects.py | sandergl/01-IntroductionToPython | fcd028283bda4c6f6ca640bbb6e303b5b0e46777 | [
"MIT"
] | null | null | null | src/m4_SimpleTurtle_objects.py | sandergl/01-IntroductionToPython | fcd028283bda4c6f6ca640bbb6e303b5b0e46777 | [
"MIT"
] | null | null | null | """
Demonstrates using OBJECTS via Turtle Graphics.
Concepts include:
-- CONSTRUCT an INSTANCE of a CLASS (we call such instances OBJECTS).
-- Make an object ** DO ** something by using a METHOD.
-- Reference an object's ** DATA ** by using an INSTANCE VARIABLE.
Also:
-- ASSIGNING a VALUE to a NAME (VARIABLE).
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,
their colleagues and Garrett Sanders.
"""
###############################################################################
#
# DONE: 1.
# Yes, that means for YOU to DO things per the following instructions:
#
# On Line 13 above, replace PUT_YOUR_OWN_NAME_HERE with your OWN name.
#
# BTW, the top block of text above forms what is called a DOC-STRING.
# It documents what this module does, in a way that exterior programs
# can make sense of. It has no other effect on this program.
#
###############################################################################
import rosegraphics as rg
###############################################################################
#
# DONE: 2.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
#
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
#
# You will see that rosegraphics in the import statement above (line 28)
# is no longer marked as an error. You will do this
# in all projects that use rosegraphics, so get used to it. :)
#
# Once rosegraphics in the import statement is no longer marked as error,
# change this _TODO_ to DONE and ** continue to the next _TODO_ (below). **
#
###############################################################################
###############################################################################
#
# DONE: 3.
# Run this module. A window will pop up and Turtles will move around.
# After the Turtles stop moving, *click anywhere in the window to close it*.
#
# Then look at the code below. Raise your hand when you have questions about
# what the code is doing. Be sure that you understand the notations for:
# -- CONSTRUCTING an instance of a CLASS, e.g.
# rg.SimpleTurtle()
# -- ASSIGNING the resulting OBJECT (instance of a class) a NAME, e.g.
# natasha = rg.SimpleTurtle()
# -- Applying a METHOD to an object to make the object DO something, e.g.
# natasha.forward(100)
# -- Accessing an INSTANCE VARIABLE of an object, e.g.
# natasha.speed = 10
# boris.speed = natasha.speed
#
# After you are confident that you understand all the code below,
# change this _TODO_ to DONE and ** continue to the next _TODO_ (below). **
#
###############################################################################
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - CONSTRUCT (make and initialize) a TurtleWindow object for animation.
# The definition of a TurtleWindow is in the rg
# (shorthand for rosegraphics) module.
# -----------------------------------------------------------------------------
window = rg.TurtleWindow()
window.delay(20) # Bigger numbers mean slower animation.
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - CONSTRUCT (make) a SimpleTurtle object and ASSIGN a NAME to the object.
# -----------------------------------------------------------------------------
boris = rg.SimpleTurtle()
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - Ask the SimpleTurtle object to do things by applying METHODs to it.
# The numbers in the parentheses are called ARGUMENTS.
# -----------------------------------------------------------------------------
boris.forward(100)
boris.left(90)
boris.forward(200)
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - Construct a second SimpleTurtle,
# set its pen and speed INSTANCE VARIABLES, and ask it to do things.
# -----------------------------------------------------------------------------
natasha = rg.SimpleTurtle('turtle')
natasha.pen = rg.Pen('red', 30) # Second argument is the Pen's thickness
natasha.speed = 10 # Faster
natasha.backward(50)
natasha.speed = 5
natasha.right(90)
natasha.pen = rg.Pen('blue', 10)
natasha.forward(50)
natasha.speed = 1 # Now slower
natasha.go_to(rg.Point(-100, 200))
natasha.speed = 20
natasha.pen = rg.Pen('orange', 5)
natasha.go_to(rg.Point(300, -300))
###############################################################################
#
# DONE: 4.
# Add a few more lines of your own code to make one of the
# existing SimpleTurtles move some more and/or have different
# characteristics.
#
# ** Nothing fancy is required. **
# ** A SUBSEQUENT exercise will let you show your creativity. **
#
# As always, test by running the module.
#
###############################################################################
###############################################################################
#
# DONE: 5.
# The above code CONSTRUCTS two SimpleTurtle objects
# and gives those objects NAMES:
# boris natasha
#
# Add code of your own that constructs another SimpleTurtle object,
# naming it whatever you want. Names cannot have spaces or special
# characters, but they can have digits and underscores, e.g.
# this_1_has
#
# STYLE RULE: Your names should always begin with a LOWER_CASE letter.
# So mary is OK but Mary is NOT OK.
#
# Then add more code that:
# -- Constructs a Pen object,
# -- assigns your SimpleTurtle's pen to the constructed Pen object, and
# -- makes your SimpleTurtle move around a bit.
#
# ** Nothing fancy is required. **
# ** A SUBSEQUENT exercise will let you show your creativity. **
#
# As always, test by running the module.
#
###############################################################################
frank = rg.SimpleTurtle()
frank.pen = rg.Pen('brown', 20) # Second argument is the Pen's thickness
natasha.speed = 20 # Faster
frank.left(250)
frank.pen = rg.Pen('pink', 10)
frank.forward(100)
frank.speed = 5
frank.right(90)
frank.backward(100)
frank.speed = 1 # Now slower
frank.go_to(rg.Point(-100, 200))
frank.speed = 20
frank.pen = rg.Pen('purple', 12)
frank.go_to(rg.Point(-300, 300))
###############################################################################
#
# DONE: 6.
# Run one more time to be sure that all is still OK.
# Ensure that no blue bars on the scrollbar-thing to the right remain.
#
# Then COMMIT-and-PUSH your work as before:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3. In the Commit Changes window that pops up:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.
#
# You can COMMIT-and-PUSH as often as you like. DO IT FREQUENTLY.
#
###############################################################################
# -----------------------------------------------------------------------------
# The next line keeps the window open until the user clicks in the window.
# Throughout this exercise, this close_on_mouse_click line should be the
# LAST line in the file. DO NOT ADD CODE BELOW THIS LINE!
# -----------------------------------------------------------------------------
window.close_on_mouse_click()
| 39.085859 | 79 | 0.531464 |
acdfd5e468b4314406dcd948994cd3e53a1fb8a4 | 790 | py | Python | MyNQL/utils.py | livinter/MyNQL | cafe076a813b726e3e6310453295011ed01d2e26 | [
"MIT"
] | 5 | 2018-06-01T05:56:30.000Z | 2021-03-24T18:14:23.000Z | MyNQL/utils.py | livinter/MyNQL | cafe076a813b726e3e6310453295011ed01d2e26 | [
"MIT"
] | 6 | 2018-05-31T05:41:14.000Z | 2018-06-02T07:11:15.000Z | MyNQL/utils.py | livinter/MyNQL | cafe076a813b726e3e6310453295011ed01d2e26 | [
"MIT"
] | null | null | null | import networkx as nx
fake_db = {}
# replace this with your favourite database
# should be a key value store, the key may be converted to/from tuple and the data-field should be text
def fake_db_serializer(action, key, text):
global fake_db
# the key is a tuple and can consist of integer or strings
# print (action, key, text)
if action == "INSERT":
fake_db[key] = text
if action == "UPDATE":
fake_db[key] = text
if action == "DELETE":
del fake_db[key]
# node-link like in the d3.js example http://bl.ocks.org/mbostock/4062045
# use this function to save/export the network as a .json file that can be embedded as a nice html
def save_node_link_data(G, file):
import json
open(file, "w").write(json.dumps(nx.node_link_data(G)))
| 31.6 | 103 | 0.687342 |
acdfd6826d43565bee8b35fa7745895604a4a7c6 | 2,515 | py | Python | run_mypy.py | Socrats/Axelrod | 99ea594a373f684a25e80da2c8eccbcd194754b7 | [
"MIT"
] | null | null | null | run_mypy.py | Socrats/Axelrod | 99ea594a373f684a25e80da2c8eccbcd194754b7 | [
"MIT"
] | null | null | null | run_mypy.py | Socrats/Axelrod | 99ea594a373f684a25e80da2c8eccbcd194754b7 | [
"MIT"
] | null | null | null | import subprocess
import sys
modules = ["run_strategy_indexer.py",
"axelrod/action.py",
"axelrod/deterministic_cache.py",
"axelrod/ecosystem.py",
"axelrod/fingerprint.py",
"axelrod/game.py",
"axelrod/load_data_.py",
"axelrod/mock_player.py",
"axelrod/moran.py",
"axelrod/plot.py",
"axelrod/random_.py",
"axelrod/tournament.py",
"axelrod/strategies/adaptive.py",
"axelrod/strategies/alternator.py",
"axelrod/strategies/ann.py",
"axelrod/strategies/apavlov.py",
"axelrod/strategies/appeaser.py",
"axelrod/strategies/averagecopier.py",
"axelrod/strategies/axelrod_first.py",
"axelrod/strategies/axelrod_second.py",
"axelrod/strategies/backstabber.py",
"axelrod/strategies/better_and_better.py",
"axelrod/strategies/calculator.py",
"axelrod/strategies/cooperator.py",
"axelrod/strategies/cycler.py",
"axelrod/strategies/darwin.py",
"axelrod/strategies/defector.py",
"axelrod/strategies/forgiver.py",
"axelrod/strategies/geller.py",
"axelrod/strategies/gradualkiller.py",
"axelrod/strategies/grudger.py",
"axelrod/strategies/grumpy.py",
"axelrod/strategies/handshake.py",
"axelrod/strategies/hunter.py",
"axelrod/strategies/inverse.py",
"axelrod/strategies/mathematicalconstants.py",
"axelrod/strategies/memoryone.py",
"axelrod/strategies/memorytwo.py",
"axelrod/strategies/mindcontrol.py",
"axelrod/strategies/mindreader.py",
"axelrod/strategies/mutual.py",
"axelrod/strategies/negation.py",
"axelrod/strategies/oncebitten.py",
"axelrod/strategies/prober.py",
"axelrod/strategies/punisher.py",
"axelrod/strategies/qlearner.py",
"axelrod/strategies/rand.py",
"axelrod/strategies/titfortat.py",
"axelrod/strategies/hmm.py",
"axelrod/strategies/human.py",
"axelrod/strategies/finite_state_machines.py",
"axelrod/strategies/worse_and_worse.py"]
exit_codes = []
for module in modules:
rc = subprocess.call(["mypy", "--ignore-missing-imports",
"--follow-imports", "skip", module])
exit_codes.append(rc)
sys.exit(max(exit_codes))
| 40.564516 | 62 | 0.599205 |
acdfd68e28ca33584715451537864d6cb6bd5039 | 13,013 | py | Python | examples/advanced.py | blundeln/pylens | a73f02c1d752d98059816443f94d20f73a74c561 | [
"BSD-3-Clause"
] | 1 | 2017-11-05T09:09:48.000Z | 2017-11-05T09:09:48.000Z | examples/advanced.py | blundeln/pylens | a73f02c1d752d98059816443f94d20f73a74c561 | [
"BSD-3-Clause"
] | null | null | null | examples/advanced.py | blundeln/pylens | a73f02c1d752d98059816443f94d20f73a74c561 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2010-2011, Nick Blundell
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Nick Blundell nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#
# Author: Nick Blundell <blundeln [AT] gmail [DOT] com>
# Organisation: www.nickblundell.org.uk
#
# Description:
# Some tests that serve as more complex examples
#
from pylens import *
from pylens.debug import d # Like print(...)
def complex_class_test() :
"""
This is an example of how we could embedded lenses within classes to
manipulate the widely used interfaces.conf file to configure network
interfaces of a UNIX systems.
Note that it does not aim to be complete, just a demonstration of how you
could compose such a mapping.
"""
INPUT = """iface eth0-home inet static
address 192.168.1.1
netmask 255.255.255.0
gateway 67.207.128.1
dns-nameservers 67.207.128.4 67.207.128.5
up flush-mail
auto lo eth0
# A comment
auto eth1
"""
# First we define a class to represent the iface stanza. I break it up a
# little to make it clearer.
class NetworkInterface(LensObject) :
# Some component lenses.
indent = WS(" ")
interface_attribute = KeyValue(indent + Keyword(additional_chars="_-", is_label=True) + WS(" ") + Until(NL(), type=str) + NL())
# Put it all together.
__lens__ = "iface" + WS(" ") + Keyword(additional_chars="_-", is_label=True) + WS(" ") + \
Keyword(label="address_family") + WS(" ") + Keyword(label="method") + NL() + \
ZeroOrMore(interface_attribute)
def __init__(self, **kargs) :
"""A simple constructor, which simply store keyword args as attributes."""
for key, value in kargs.iteritems() :
setattr(self, key, value)
# Define label mappings, so labels such as "dns-nameservers" are mapped to and
# from a valid python identifier such as "dns_nameservers" and can
# therefore be manipulated as object attributes.
def _map_label_to_identifier(self, label) :
return label.replace("-","_")
def _map_identifier_to_label(self, attribute_name) :
return attribute_name.replace("_", "-")
# Now we can define a class to represent the whole configuration, such that
# it will contain NetworkInterface objects, etc.
class InterfaceConfiguration(LensObject) :
auto_lens = Group("auto" + WS(" ") + List(Keyword(additional_chars="_-", type=str), WS(" "), type=None) + WS("") + NL(), type=list, name="auto_lens")
__lens__ = ZeroOrMore(NetworkInterface | auto_lens | HashComment() | BlankLine())
# Define containers within this container.
interfaces = Container(store_items_of_type=[NetworkInterface], type=dict)
auto_interfaces = Container(store_items_from_lenses=[auto_lens], type=list)
if True:
test_description("GET InterfaceConfiguration")
config = get(InterfaceConfiguration, INPUT)
assert_equal(config.interfaces["eth0-home"].address, "192.168.1.1")
assert_equal(config.auto_interfaces[0][1],"eth0")
assert_equal(len(config.auto_interfaces),2)
test_description("PUT InterfaceConfiguration")
config.interfaces["eth0-home"].netmask = "bananas"
config.auto_interfaces[0].insert(1,"wlan2")
output = put(config)
assert_equal(output, """iface eth0-home inet static
address 192.168.1.1
gateway 67.207.128.1
dns-nameservers 67.207.128.4 67.207.128.5
up flush-mail
netmask bananas
auto lo wlan2 eth0
# A comment
auto eth1
""")
test_description("CREATE InterfaceConfiguration")
GlobalSettings.check_consumption = True
interface = NetworkInterface(address_family="inet", method="static", dns_nameservers="1.2.3.4 1.2.3.5", netmask="255.255.255.0")
interface.some_thing = "something or another"
config = InterfaceConfiguration()
config.interfaces = {"eth3":interface}
config.auto_interfaces = [["eth0"], ["wlan2", "eth2"]]
output = put(config)
assert_equal(output, """iface eth3 inet static
dns-nameservers 1.2.3.4 1.2.3.5
some-thing something or another
netmask 255.255.255.0
auto eth0
auto wlan2 eth2
""")
def debctrl_test() :
"""An example based on an example from the Augeas user guide."""
# As a whole, this is a fairly complex lens, though as you work though it you
# should see that the steps are fairly consistant.
# This lens demonstrates the use of labels and the auto_list lens modifier. I
# also use incremental testing throughout, which should help you to follow
# it.
DEB_CTRL = """Source: libconfig-model-perl
Section: perl
Maintainer: Debian Perl Group <pkg-perl-maintainers@xx>
Build-Depends: debhelper (>= 7.0.0),
perl-modules (>= 5.10) | libmodule-build-perl
Build-Depends-Indep: perl (>= 5.8.8-12), libcarp-assert-more-perl,
libconfig-tiny-perl, libexception-class-perl,
libparse-recdescent-perl (>= 1.90.0),
liblog-log4perl-perl (>= 1.11)
"""
# We build up the lens starting with the easier parts, testing snippets as we go.
# Recall, when we set is_label we imply the lens has type=str (i.e is a STORE
# lens)
simple_entry_label = Literal("Source", is_label=True) \
| Literal("Section", is_label=True) \
| Literal("Maintainer", is_label=True)
#
# Some general lenses for non-store artifacts of the string structure.
#
colon = WS("") + ":" + WS(" ", optional=True)
comma_sep = WS("", indent_continuation=True) + "," + WS("\n ", indent_continuation=True)
option_sep = WS(" ", indent_continuation=True, optional=True) + "|" + WS(" ", indent_continuation=True, optional=True)
#
# simple_entry lens
#
# We lazily use the Until lens here, but you could parse the value further if you liked.
# Note, auto_list unwraps a list if there is a single item, for convenience.
# It is useful when we wish to associated a single item with a labelled
# group.
simple_entry = Group(simple_entry_label + colon + Until(NewLine(), type=str) + NewLine(), type=list, auto_list=True)
# Test the simple_entry lens
got = simple_entry.get("Maintainer: Debian Perl Group <pkg-perl-maintainers@xx>\n")
# Just to highlight the effect of auto_list on a list type lens.
if simple_entry.options.auto_list :
assert_equal(got, "Debian Perl Group <pkg-perl-maintainers@xx>")
else :
assert_equal(got, ["Debian Perl Group <pkg-perl-maintainers@xx>"])
# An insight into how pylens stores meta data on items to assist storage.
assert_equal(got._meta_data.label, "Maintainer")
# Now try to PUT with the lens.
# Notice how, since we are creating a new item with the lens, we must pass a
# label to the lens, which is considered out-of-band of the item (i.e. it is
# meta data).
assert_equal(simple_entry.put("some value", label="Source"), "Source: some value\n")
#
# depends_entry lens
#
# Note the order of these: longest match first, since they share a prefix.
depends_entry_label = Literal("Build-Depends-Indep", is_label=True) \
| Literal("Build-Depends", is_label=True)
# Here is an interesting lens, so let me explain it.
# Each dependancy may be either a single application or a list of alternative
# applications (separated by a '|'), so we use an List lens and set it as an
# auto_list.
# Since the application may have an optional version string, we store the application
# info in a dict using labels for the app name and version string.
package_options = List(
Group(
Word(alphanums+"-", init_chars=alphas, label="name") +
Optional(WS(" ") + "(" + Until(")", label="version") + ")"),
type=dict
),
option_sep,
auto_list=True,
type=list,
)
got = package_options.get("perl-modules (>= 5.10) | libmodule-build-perl")
assert_equal(got, [{"name":"perl-modules", "version":">= 5.10"}, {"name":"libmodule-build-perl"}])
# Then test auto_list ensures the list is unwrapped for a single item.
assert_equal(package_options.get("perl-modules (>= 5.10)"), {"name":"perl-modules", "version":">= 5.10"})
assert_equal(package_options.put({"version":"3.4", "name":"some-app"}), "some-app (3.4)")
assert_equal(package_options.put([{"version":"3.4", "name":"some-app"}, {"version":"< 1.2", "name":"another-app"}]), "some-app (3.4) | another-app (< 1.2)")
# Now we wrap the package options in a comma separated list. Notice how we do
# not set the type to list, since we wish these items to be stored in a higher
# level list, to avoid excessive list nesting.
depends_list = List(package_options, comma_sep)
# It might be over the top, but let's make sure this part works too.
# Note that, for the isolated test of this lens we must set a type on it,
# otherwise the sub-lenses will have nothing in which to store their extracted
# items.
depends_list.type = list
got = depends_list.get("""debhelper (>= 7.0.0) | cheese,\n \t perl-modules (>= 5.10) , libmodule-build-perl | monkey (1.2)""")
assert_equal(got, [
[{"name":"debhelper", "version":">= 7.0.0"}, {"name":"cheese"}],
{"name":"perl-modules", "version":">= 5.10"}, # Not in list due to auto_list.
[{"name":"libmodule-build-perl"}, {"name":"monkey", "version":"1.2"}],
])
# Now lets try to PUT (actually CREATE a new) our abstract structure into a string.
output = depends_list.put([
[{"name":"beans", "version":">= 1.2"}, {"name":"eggs"}, {"name":"spam", "version":"<= 2.4"}],
{"name":"cheese", "version":"3.3"},
])
assert_equal(output, "beans (>= 1.2) | eggs | spam (<= 2.4),\n cheese (3.3)")
# Remember to remove the type now that it has been tested in isolation.
depends_list.type = None
# Now put the dependancy entry togather.
depends_entry = Group(depends_entry_label + colon + depends_list + WS("") + NewLine(), type=list)
# And now we have our final lens.
lens = Repeat(simple_entry | depends_entry, type=dict, alignment=SOURCE)
# This names all of the lenses based on their variable names, to improve clarity of debug logs.
auto_name_lenses(locals())
# Now lets get the config file snippet as an abstract form we can easily
# manipulate.
got = lens.get(DEB_CTRL)
# Now let's modify it a bit
del got["Build-Depends"]
# Lets insert some more dependancies.
got["Build-Depends-Indep"].insert(2,
[{"name":"cheese", "version":"3.3"}, {"name":"spam"}]
)
output = lens.put(got)
# Now lets check the output.
assert_equal(output, """Source: libconfig-model-perl
Section: perl
Maintainer: Debian Perl Group <pkg-perl-maintainers@xx>
Build-Depends-Indep: perl (>= 5.8.8-12), libcarp-assert-more-perl,
cheese (3.3) | spam, libconfig-tiny-perl,
libexception-class-perl,
libparse-recdescent-perl (>= 1.90.0),
liblog-log4perl-perl (>= 1.11)
""")
# Now let's finish off by creating some output from scratch (i.e. using
# default values of all non-store lenses rather than any original input.
data = {
"Source": "Just a simple entry",
"Build-Depends-Indep": [
[{"name":"cheese", "version":"1.2"}, {"name":"nbdebug"}],
{"name":"someapp", "version":"<= 1.1"},
]
}
output = lens.put(data)
assert_equal(output, """Source: Just a simple entry\nBuild-Depends-Indep: cheese (1.2) | nbdebug,\n someapp (<= 1.1)\n""")
# Useful for testing an installation.
if __name__ == "__main__" :
debctrl_test()
| 41.050473 | 158 | 0.672328 |
acdfd6ed3d38dcf41e7d172086b2238c0971d81d | 3,187 | py | Python | pinkfish/tests/test_trade.py | HaidongHe/pinkfish | e7d43744738e4e2b7ab77b048ba3dda35e8e21f5 | [
"MIT"
] | 1 | 2020-03-31T04:02:19.000Z | 2020-03-31T04:02:19.000Z | pinkfish/tests/test_trade.py | jimmyhzuk/pinkfish | d9bba22ba19a548b5ac65da63bfee40623c77cda | [
"MIT"
] | null | null | null | pinkfish/tests/test_trade.py | jimmyhzuk/pinkfish | d9bba22ba19a548b5ac65da63bfee40623c77cda | [
"MIT"
] | null | null | null | import datetime
import unittest
import pandas as pd
import pinkfish as pf
class TestTrade(unittest.TestCase):
def setUp(self):
self.trade_log = pf.trade.TradeLog()
self.date = datetime.datetime.now().date()
# keep all the prices at 10, shares and cash at 1000
self.high = self.low = self.close = 10
self.shares = self.cash = 1000
self.state = pf.trade.TradeState.HOLD
def call_append(self, bal):
''' We'll be running this several times. '''
bal.append(self.date,
self.high, self.low, self.close,
self.shares, self.cash, self.state)
def test_daily_balance_state(self):
''' For t to be instantiated the trade state must exist.
Throw an error if the trade state is not valid.
'''
bal = pf.DailyBal()
self.state = None
with self.assertRaises(pf.trade.TradeStateError):
self.call_append(bal)
def test_daily_balance_append(self):
''' Check that the daily log grows as expected. '''
bal = pf.DailyBal()
df = bal.get_log()
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertTrue(len(df.index) == 0)
self.state = pf.trade.TradeState.OPEN
self.call_append(bal)
df = bal.get_log()
self.assertTrue(len(df.index) == 1)
self.state = pf.trade.TradeState.HOLD
self.call_append(bal)
df = bal.get_log()
self.assertTrue(len(df.index) == 2)
self.state = pf.trade.TradeState.CLOSE
self.call_append(bal)
df = bal.get_log()
self.assertTrue(len(df.index) == 3)
price_columns = ["high", "low", "close"]
other_columns = ["shares", "cash"]
# we are keeping the high, low and close the same for this test
# cash and shares are also kept equal
portfolio_value = self.high * self.shares + self.cash
# all prices are $x
prices = list(set(df[price_columns].values.flatten().tolist()))
self.assertTrue(len(prices) == 1)
self.assertTrue(prices[0] == portfolio_value)
# all other values (except state) are y
others = list(set(df[other_columns].values.flatten().tolist()))
self.assertTrue(len(others) == 1)
self.assertTrue(others[0] == self.shares)
# check the order of our states
states = df["state"].values.tolist()
self.assertTrue(states[0] == pf.trade.TradeState.OPEN)
self.assertTrue(states[1] == pf.trade.TradeState.HOLD)
self.assertTrue(states[2] == pf.trade.TradeState.CLOSE)
def test_calc_shares(self):
''' Check we are getting the correct number of shares
for some level of cash.
'''
cash, price = 11, 2
shares, cash = self.trade_log.calc_shares(cash, price)
self.assertEqual(shares, 5)
self.assertEqual(cash, 1)
def test_calc_cash(self):
''' Calculate the cash on liquidation. '''
start_cash = 1
shares = 10
price = 2
end_cash = self.trade_log.calc_cash(start_cash, price, shares)
self.assertEqual(end_cash, 21)
| 34.268817 | 71 | 0.604644 |
acdfd77e6492b605a8424caf5f796d2f55d6e927 | 5,555 | py | Python | exabel_data_sdk/stubs/protoc_gen_openapiv2/options/annotations_pb2.py | burk/python-sdk | 83fb81d09e0d6a407c8907a75bebb895decc7edc | [
"MIT"
] | null | null | null | exabel_data_sdk/stubs/protoc_gen_openapiv2/options/annotations_pb2.py | burk/python-sdk | 83fb81d09e0d6a407c8907a75bebb895decc7edc | [
"MIT"
] | null | null | null | exabel_data_sdk/stubs/protoc_gen_openapiv2/options/annotations_pb2.py | burk/python-sdk | 83fb81d09e0d6a407c8907a75bebb895decc7edc | [
"MIT"
] | null | null | null | """Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
from ...protoc_gen_openapiv2.options import openapiv2_pb2 as protoc__gen__openapiv2_dot_options_dot_openapiv2__pb2
DESCRIPTOR = _descriptor.FileDescriptor(name='protoc_gen_openapiv2/options/annotations.proto', package='grpc.gateway.protoc_gen_openapiv2.options', syntax='proto3', serialized_options=b'ZFgithub.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options', create_key=_descriptor._internal_create_key, serialized_pb=b'\n.protoc_gen_openapiv2/options/annotations.proto\x12)grpc.gateway.protoc_gen_openapiv2.options\x1a google/protobuf/descriptor.proto\x1a,protoc_gen_openapiv2/options/openapiv2.proto:l\n\x11openapiv2_swagger\x12\x1c.google.protobuf.FileOptions\x18\x92\x08 \x01(\x0b22.grpc.gateway.protoc_gen_openapiv2.options.Swagger:r\n\x13openapiv2_operation\x12\x1e.google.protobuf.MethodOptions\x18\x92\x08 \x01(\x0b24.grpc.gateway.protoc_gen_openapiv2.options.Operation:m\n\x10openapiv2_schema\x12\x1f.google.protobuf.MessageOptions\x18\x92\x08 \x01(\x0b21.grpc.gateway.protoc_gen_openapiv2.options.Schema:g\n\ropenapiv2_tag\x12\x1f.google.protobuf.ServiceOptions\x18\x92\x08 \x01(\x0b2..grpc.gateway.protoc_gen_openapiv2.options.Tag:n\n\x0fopenapiv2_field\x12\x1d.google.protobuf.FieldOptions\x18\x92\x08 \x01(\x0b25.grpc.gateway.protoc_gen_openapiv2.options.JSONSchemaBHZFgithub.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/optionsb\x06proto3', dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR, protoc__gen__openapiv2_dot_options_dot_openapiv2__pb2.DESCRIPTOR])
OPENAPIV2_SWAGGER_FIELD_NUMBER = 1042
openapiv2_swagger = _descriptor.FieldDescriptor(name='openapiv2_swagger', full_name='grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger', index=0, number=1042, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
OPENAPIV2_OPERATION_FIELD_NUMBER = 1042
openapiv2_operation = _descriptor.FieldDescriptor(name='openapiv2_operation', full_name='grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation', index=1, number=1042, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
OPENAPIV2_SCHEMA_FIELD_NUMBER = 1042
openapiv2_schema = _descriptor.FieldDescriptor(name='openapiv2_schema', full_name='grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema', index=2, number=1042, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
OPENAPIV2_TAG_FIELD_NUMBER = 1042
openapiv2_tag = _descriptor.FieldDescriptor(name='openapiv2_tag', full_name='grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag', index=3, number=1042, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
OPENAPIV2_FIELD_FIELD_NUMBER = 1042
openapiv2_field = _descriptor.FieldDescriptor(name='openapiv2_field', full_name='grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field', index=4, number=1042, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
DESCRIPTOR.extensions_by_name['openapiv2_swagger'] = openapiv2_swagger
DESCRIPTOR.extensions_by_name['openapiv2_operation'] = openapiv2_operation
DESCRIPTOR.extensions_by_name['openapiv2_schema'] = openapiv2_schema
DESCRIPTOR.extensions_by_name['openapiv2_tag'] = openapiv2_tag
DESCRIPTOR.extensions_by_name['openapiv2_field'] = openapiv2_field
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
openapiv2_swagger.message_type = protoc__gen__openapiv2_dot_options_dot_openapiv2__pb2._SWAGGER
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(openapiv2_swagger)
openapiv2_operation.message_type = protoc__gen__openapiv2_dot_options_dot_openapiv2__pb2._OPERATION
google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(openapiv2_operation)
openapiv2_schema.message_type = protoc__gen__openapiv2_dot_options_dot_openapiv2__pb2._SCHEMA
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(openapiv2_schema)
openapiv2_tag.message_type = protoc__gen__openapiv2_dot_options_dot_openapiv2__pb2._TAG
google_dot_protobuf_dot_descriptor__pb2.ServiceOptions.RegisterExtension(openapiv2_tag)
openapiv2_field.message_type = protoc__gen__openapiv2_dot_options_dot_openapiv2__pb2._JSONSCHEMA
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(openapiv2_field)
DESCRIPTOR._options = None | 154.305556 | 1,400 | 0.872727 |
acdfd79c2cbe8a3dc78bc71e645941ea92c7f0ef | 1,707 | py | Python | scale/ingest/scan/scanners/s3_scanner.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 121 | 2015-11-18T18:15:33.000Z | 2022-03-10T01:55:00.000Z | scale/ingest/scan/scanners/s3_scanner.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 1,415 | 2015-12-23T23:36:04.000Z | 2022-01-07T14:10:09.000Z | scale/ingest/scan/scanners/s3_scanner.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 66 | 2015-12-03T20:38:56.000Z | 2020-07-27T15:28:11.000Z | """Defines a scanner that scans an S3 bucket backed workspace for files"""
from __future__ import unicode_literals
import logging
from ingest.scan.scanners.scanner import Scanner
logger = logging.getLogger(__name__)
class S3Scanner(Scanner):
"""A scanner for an S3 bucket backed workspace
"""
def __init__(self):
"""Constructor
"""
super(S3Scanner, self).__init__('s3', ['s3'])
def load_configuration(self, configuration):
"""See :meth:`ingest.scan.scanners.scanner.Scanner.load_configuration`
"""
# Nothing to do as all configuration is done at workspace broker level.
pass
def validate_configuration(self, configuration):
"""See :meth:`ingest.scan.scanners.scanner.Scanner.validate_configuration`
"""
# No configuration is required for S3 scanner as everything is provided
# by way of the workspace configurations.
return []
def _ingest_file(self, file_name, file_size):
"""Initiates ingest for a single S3 object
:param file_name: S3 object key
:type file_name: string
:param file_size: object size in bytes
:type file_size: int
:returns: Ingest model prepped for bulk create
:rtype: :class:`ingest.models.Ingest`
"""
ingest = None
if self._dry_run:
logger.info("Scan detected S3 object in workspace '%s': %s" % (self._scanned_workspace.name, file_name))
else:
ingest = self._process_ingest(file_name, file_size)
logger.info("Scan processed S3 object from workspace '%s': %s" % (self._scanned_workspace.name, file_name))
return ingest
| 29.947368 | 119 | 0.654364 |
acdfd86efabc5f5841478acd39e360e64a534877 | 3,112 | py | Python | AutoSketcher/models/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | D1anaGreen/essaykiller | 75311a23dc1f5dc8b5040114fdeda67248700f7a | [
"Apache-2.0"
] | 4,551 | 2020-09-29T14:50:03.000Z | 2022-03-31T00:40:45.000Z | AutoSketcher/models/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | D1anaGreen/essaykiller | 75311a23dc1f5dc8b5040114fdeda67248700f7a | [
"Apache-2.0"
] | 28 | 2020-10-01T08:03:23.000Z | 2022-03-30T15:40:40.000Z | AutoSketcher/models/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | D1anaGreen/essaykiller | 75311a23dc1f5dc8b5040114fdeda67248700f7a | [
"Apache-2.0"
] | 809 | 2020-10-01T05:34:58.000Z | 2022-03-31T00:40:48.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME,
OpenAIGPTConfig,
OpenAIGPTModel,
load_tf_weights_in_openai_gpt)
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct models
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path)
# Save pytorch-models
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch models to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--openai_checkpoint_folder_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch models.")
parser.add_argument("--openai_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI models. \n"
"This specifies the models architecture.")
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path,
args.openai_config_file,
args.pytorch_dump_folder_path)
| 42.630137 | 118 | 0.63464 |
acdfd8d247212942d256e837b1e073bfded0bdd6 | 1,457 | py | Python | setup.py | amacd31/gr2m | 92ebcb4019e192aa01b736879d2ef24e5e4d037a | [
"BSD-3-Clause"
] | 3 | 2018-06-05T01:43:08.000Z | 2020-09-01T15:24:48.000Z | setup.py | amacd31/gr2m | 92ebcb4019e192aa01b736879d2ef24e5e4d037a | [
"BSD-3-Clause"
] | null | null | null | setup.py | amacd31/gr2m | 92ebcb4019e192aa01b736879d2ef24e5e4d037a | [
"BSD-3-Clause"
] | 2 | 2019-09-22T05:41:24.000Z | 2022-03-16T13:15:08.000Z | import os
from io import open
import versioneer
from setuptools import setup
try:
from Cython.Build import cythonize
ext_modules = cythonize(["gr2m/gr2m.py"])
except:
ext_modules = []
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gr2m',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Python implementation of the GR2M hydrologic rainfall-runoff model.',
long_description=long_description,
author='Andrew MacDonald',
author_email='andrew@maccas.net',
license='BSD',
url='https://github.com/amacd31/gr2m',
packages = ['gr2m'],
ext_modules = ext_modules,
test_suite = 'nose.collector',
test_requires = [
'numpy',
'pandas',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 29.734694 | 86 | 0.635553 |
acdfda47c1601b4c095e295cd5a2e60f40aebe43 | 850 | py | Python | setup.py | marnixlenoble/spotlight | 6052f490963ecb4172131eb2e17d4804ed757de5 | [
"MIT"
] | null | null | null | setup.py | marnixlenoble/spotlight | 6052f490963ecb4172131eb2e17d4804ed757de5 | [
"MIT"
] | null | null | null | setup.py | marnixlenoble/spotlight | 6052f490963ecb4172131eb2e17d4804ed757de5 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from collections import OrderedDict
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="spotlight",
version="2.0.1",
author="Michiel Doesburg",
author_email="michiel@moddix.com",
description="Laravel style data validation for Python.",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="spotlight validation validate",
url="https://github.com/mdoesburg/spotlight",
project_urls=OrderedDict(
(
("Documentation", "https://github.com/mdoesburg/spotlight"),
("Code", "https://github.com/mdoesburg/spotlight"),
)
),
license="MIT",
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
install_requires=[],
)
| 29.310345 | 72 | 0.669412 |
acdfda9b61f62189d1a6c5a9134399fd893669d8 | 1,979 | py | Python | config/settings/test.py | bongbongco/Kagong | 4efd0c98bb6b3f5465b46176591ab9882580decf | [
"MIT"
] | null | null | null | config/settings/test.py | bongbongco/Kagong | 4efd0c98bb6b3f5465b46176591ab9882580decf | [
"MIT"
] | 15 | 2020-06-05T17:01:28.000Z | 2022-03-11T23:26:07.000Z | config/settings/test.py | bongbongco/Kagong | 4efd0c98bb6b3f5465b46176591ab9882580decf | [
"MIT"
] | null | null | null | """
Test settings for kagong project.
- Used to run tests fast on the continuous integration server and locally
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
# Turn debug off so tests run faster
DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = False
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='vJfTf8PTmkkopABLAnPhH0AOKogLrO7AIO3QL64EloL3GHHynq')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# In-memory email backend stores messages in django.core.mail.outbox
# for unit testing purposes
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# CACHING
# ------------------------------------------------------------------------------
# Speed advantages of in-memory caching without having to run Memcached
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# PASSWORD HASHING
# ------------------------------------------------------------------------------
# Use fast password hasher so tests run faster
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# TEMPLATE LOADERS
# ------------------------------------------------------------------------------
# Keep templates in memory so tests run faster
TEMPLATES[0]['OPTIONS']['loaders'] = [
['django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
], ],
]
| 31.919355 | 99 | 0.521981 |
acdfdaffcebd80540689ae940faae2ad776cd99d | 3,117 | py | Python | meiduo_mall/meiduo_mall/apps/contents/generate_index.py | Nicholas-violet/meiduo_project_preview | 3c17fa45d11a8889710ca7a522ac047bb99b21b8 | [
"MIT"
] | 2 | 2020-06-19T11:53:02.000Z | 2020-06-24T06:25:00.000Z | meiduo_mall/meiduo_mall/apps/contents/generate_index.py | Nicholas-violet/meiduo_project_preview | 3c17fa45d11a8889710ca7a522ac047bb99b21b8 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/contents/generate_index.py | Nicholas-violet/meiduo_project_preview | 3c17fa45d11a8889710ca7a522ac047bb99b21b8 | [
"MIT"
] | null | null | null | from collections import OrderedDict
from django.conf import settings
from django.template import loader
import os
import time
from goods.models import GoodsChannel, GoodsCategory
from contents.models import ContentCategory, Content
# 添加生成 html 文件的函数:
def generate_static_index_html():
# =====================生成上面字典格式数据=======================
# 第一部分: 从数据库中取数据:
# 定义一个有序字典对象
categories = OrderedDict()
# 对 GoodsChannel 进行 group_id 和 sequence 排序, 获取排序后的结果:
channels = GoodsChannel.objects.order_by('group_id',
'sequence')
# 遍历排序后的结果: 得到所有的一级菜单( 即,频道 )
for channel in channels:
# 从频道中得到当前的 组id
group_id = channel.group_id
# 判断: 如果当前 组id 不在我们的有序字典中:
if group_id not in categories:
# 我们就把 组id 添加到 有序字典中
# 并且作为 key值, value值 是 {'channels': [], 'sub_cats': []}
categories[group_id] = {
'channels': [],
'sub_cats': []
}
# 获取当前频道的分类名称
cat1 = channel.category
# 给刚刚创建的字典中, 追加具体信息:
# 即, 给'channels' 后面的 [] 里面添加如下的信息:
categories[group_id]['channels'].append({
'id': cat1.id,
'name': cat1.name,
'url': channel.url
})
# 根据 cat1 的外键反向, 获取下一级(二级菜单)的所有分类数据, 并遍历:
cat2s = GoodsCategory.objects.filter(parent=cat1)
# cat1.goodscategory_set.all()
for cat2 in cat2s:
# 创建一个新的列表:
cat2.sub_cats = []
cat3s = GoodsCategory.objects.filter(parent=cat2)
# 根据 cat2 的外键反向, 获取下一级(三级菜单)的所有分类数据, 并遍历:
for cat3 in cat3s:
# 拼接新的列表: key: 二级菜单名称, value: 三级菜单组成的列表
cat2.sub_cats.append(cat3)
# 所有内容在增加到 一级菜单生成的 有序字典中去:
categories[group_id]['sub_cats'].append(cat2)
# =====================生成首页广告部分数据=======================
# 我们定义一个字典, 里面将要存储广告内容部分:
contents = {}
# 从 ContentCategory 模型类中获取所有数据, 存放到 content_categories 中:
content_categories = ContentCategory.objects.all()
# 遍历刚刚获取的所有数据: 拿到每一个广告分类 cat:
for cat in content_categories:
# 根据广告分类的 外键反向
# 获取广告内容中状态为 True 并且按 sequence 排序的部分,
# 赋值给上面定义的字典, 快捷键(cat.key) 作为 key, 排序的部分作为value
contents[cat.key] = Content.objects.filter(category=cat,
status=True).order_by('sequence')
# 第二部分: 模板渲染部分:
# 把上面两部分获取的有序字典和字典作为变量,拼接新的字典 context
context = {
'categories': categories,
'contents': contents
}
# =====================获取模板,把数据添加进去生成页面====================
# 根据导入的 loader 获取 'index.html' 模板
template = loader.get_template('index.html')
# 拿到模板, 然后将 context 渲染到模板中, 生成渲染过的模板
html_text = template.render(context)
# 我们拼接新的 index.html 模板将要生成的所在地地址:
file_path = os.path.join(settings.GENERATED_STATIC_HTML_FILES_DIR, 'index.html')
# 以写的权限,将渲染过的模板重新生成, 写入到文件中.
with open(file_path, 'w', encoding='utf-8') as f:
f.write(html_text) | 33.880435 | 84 | 0.561116 |
acdfdb0ba08b13ec87bbb922e7c428d85b4312d6 | 27,688 | py | Python | src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/aio/operations/_security_partner_providers_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/aio/operations/_security_partner_providers_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/aio/operations/_security_partner_providers_operations.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SecurityPartnerProvidersOperations:
"""SecurityPartnerProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
security_partner_provider_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
security_partner_provider_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def get(
self,
resource_group_name: str,
security_partner_provider_name: str,
**kwargs
) -> "_models.SecurityPartnerProvider":
"""Gets the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityPartnerProvider, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.SecurityPartnerProvider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
security_partner_provider_name: str,
parameters: "_models.SecurityPartnerProvider",
**kwargs
) -> "_models.SecurityPartnerProvider":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityPartnerProvider')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
security_partner_provider_name: str,
parameters: "_models.SecurityPartnerProvider",
**kwargs
) -> AsyncLROPoller["_models.SecurityPartnerProvider"]:
"""Creates or updates the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:param parameters: Parameters supplied to the create or update Security Partner Provider
operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.SecurityPartnerProvider
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SecurityPartnerProvider or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.SecurityPartnerProvider]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
security_partner_provider_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.SecurityPartnerProvider":
"""Updates tags of a Security Partner Provider resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:param parameters: Parameters supplied to update Security Partner Provider tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityPartnerProvider, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.SecurityPartnerProvider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.SecurityPartnerProviderListResult"]:
"""Lists all Security Partner Providers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityPartnerProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.SecurityPartnerProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.SecurityPartnerProviderListResult"]:
"""Gets all the Security Partner Providers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityPartnerProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.SecurityPartnerProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/securityPartnerProviders'} # type: ignore
| 51.084871 | 217 | 0.680403 |
acdfdbb3788b570075ba8ea1ec6ff6fb1290f4ca | 56,118 | py | Python | tensorflow/python/distribute/mirrored_strategy_test.py | EricLi404/tensorflow | 23759800d89f7b5362c338d9a3fd72a6810c3e22 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/distribute/mirrored_strategy_test.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 88 | 2020-11-24T08:18:10.000Z | 2022-03-25T20:28:30.000Z | tensorflow/python/distribute/mirrored_strategy_test.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testNumReplicasInSync(self, distribution):
self.assertEqual(2, distribution.num_replicas_in_sync)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRunRegroupError(self, distribution):
def run_fn():
replica_id = int(self.evaluate(_replica_id()))
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(replica_id))
with distribution.scope(), self.assertRaises(AssertionError):
distribution.extended.call_for_each_replica(run_fn)
def testReduceToCpu(self, distribution):
with distribution.scope():
result = distribution.extended.call_for_each_replica(_replica_id)
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=None)
expected = sum(range(distribution.num_replicas_in_sync))
self.assertEqual(expected, self.evaluate(reduced))
def reduce_axis_helper(self, distribution, replica_squared_fn):
with distribution.scope():
num_replicas = distribution.num_replicas_in_sync
result = distribution.extended.call_for_each_replica(replica_squared_fn)
# sum
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=0)
expected = sum(x * (x + 1) for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
# mean
reduced = distribution.reduce(reduce_util.ReduceOp.MEAN, result, axis=0)
expected /= sum(x + 1 for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
def testReduceAxisToCpu(self, distribution):
for dtype in (dtypes.float32, dtypes.int32):
def replica_squared_fn(dtype=dtype):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
return array_ops.identity(
math_ops.cast([replica_id] * (replica_id + 1), dtype))
self.reduce_axis_helper(distribution, replica_squared_fn)
def set_v2_tensorshape(self, v2):
if v2:
tensor_shape.enable_v2_tensorshape()
else:
tensor_shape.disable_v2_tensorshape()
def testReduceAxisToCpuUnknownShape(self, distribution):
original_v2 = tensor_shape._TENSORSHAPE_V2_OVERRIDE # pylint: disable=protected-access
try:
for v2 in (False, True):
self.set_v2_tensorshape(v2)
for dtype in (dtypes.float32, dtypes.int32):
for shape in ((None,), None): # Test both unknown size and rank.
def replica_squared_fn(dtype=dtype, shape=shape):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
tensor = math_ops.cast([replica_id] * (replica_id + 1), dtype)
# Erase shape information
return array_ops.placeholder_with_default(tensor, shape=shape)
self.reduce_axis_helper(distribution, replica_squared_fn)
finally:
self.set_v2_tensorshape(original_v2)
def testReplicateDataset(self, distribution):
if tf2.enabled() and not context.executing_eagerly():
self.skipTest("Skipping test since we do not support graph mode in TF 2")
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterable(distribution, input_fn, expected_values)
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values)
def testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(2).interleave(
(lambda _: dataset_ops.Dataset.range(10)), cycle_length=2)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [[i, i] for i in range(0, 10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values, test_reinitialize=False,
ignore_order=True)
def testNumpyDataset(self, distribution):
self._test_numpy_dataset(distribution)
def testGlobalStepUpdate(self, distribution):
self._test_global_step_update(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
def testSummaryForReplicaZeroOnly(self, distribution):
self._test_summary_for_replica_zero_only(distribution)
def testTrainableVariables(self, distribution):
self._test_trainable_variable(distribution)
def test_prefetch_to_device_dataset(self, distribution):
input_options = distribute_lib.InputOptions(
experimental_prefetch_to_device=True)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.batch(distribution.num_replicas_in_sync)
dataset = distribution.experimental_distribute_dataset(
dataset, options=input_options)
if context.executing_eagerly():
item = next(iter(dataset))
else:
if isinstance(dataset, input_lib.DistributedDatasetV1):
item = dataset.make_initializable_iterator().get_next()
else:
self.skipTest("unsupported test combination")
device_types = [
tf_device.DeviceSpec.from_string(tensor.device).device_type for
tensor in item.values]
expected_device_types = [
tf_device.DeviceSpec.from_string(device).device_type for
device in distribution.extended.worker_devices]
self.assertAllEqual(device_types, expected_device_types)
def test_prefetch_to_host_dataset(self, distribution):
input_options = distribute_lib.InputOptions(
experimental_prefetch_to_device=False)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.batch(distribution.num_replicas_in_sync)
dataset = distribution.experimental_distribute_dataset(
dataset, options=input_options)
if context.executing_eagerly():
item = next(iter(dataset))
else:
if isinstance(dataset, input_lib.DistributedDatasetV1):
item = dataset.make_initializable_iterator().get_next()
else:
self.skipTest("unsupported test combination")
device_types = {
tf_device.DeviceSpec.from_string(tensor.device).device_type for
tensor in item.values}
self.assertAllEqual(list(device_types), ["CPU"])
def one_device_combinations():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph", "eager"])
@combinations.generate(one_device_combinations())
class MirroredOneDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn():
replica_id_str = str(self.evaluate(_replica_id()))
def thread_creator_fn(next_creator, **kwargs):
return next_creator(**kwargs) + ":thread_" + replica_id_str
with variable_scope.variable_creator_scope(thread_creator_fn):
# Create a variable in this scope.
v = variable_scope.variable(1.0)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
return v
def main_thread_creator(next_creator, **kwargs):
# We are not using the underlying next_creator for test purposes.
del next_creator, kwargs
return "main_thread"
with context.graph_mode(), \
distribution.scope(), \
variable_scope.variable_creator_scope(main_thread_creator):
result = distribution.extended.call_for_each_replica(model_fn)
result = distribution.experimental_local_results(result)
expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
"""Verify we preserve the value of executing_eagerly_outside_functions()."""
def model_fn():
return ops.executing_eagerly_outside_functions()
originally = ops.executing_eagerly_outside_functions()
with distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
# Verify this all again, but this time in a FuncGraph.
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
def testFunctionInCallForEachReplica(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(
(0, 1),
self.evaluate(distribution.experimental_local_results(result)))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testFunctionInCallForEachReplicaInsideAnotherFunction(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
@def_function.function
def step():
return distribution.extended.call_for_each_replica(model_fn)
with distribution.scope():
result = step()
self.assertEqual(
(0, 1),
self.evaluate(distribution.experimental_local_results(result)))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testControlFlowFunctionInCallForEachReplicaWithMergeCall(
self, distribution):
def merge_fn(strategy, value):
return strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
@def_function.function
def model_fn():
def body_fn(i):
return ds_context.get_replica_context().merge_call(merge_fn, args=(i,))
return control_flow_ops.while_loop_v2(lambda i: i < 2, body_fn, [0])
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testNestedFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(strategy, value):
return strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
def model_fn():
@def_function.function
def model_fn_nested():
t = constant_op.constant(1)
return ds_context.get_replica_context().merge_call(merge_fn, args=(t,))
return model_fn_nested()
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
ds_context.get_replica_context().merge_call(merge_fn)
return 0.
with distribution.scope():
self.assertEqual(
self.evaluate(distribution.extended.call_for_each_replica(model_fn)),
0.)
def testFunctionInCallForEachReplicaCached(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(None)
self.assertEmpty(traces)
for i in range(10):
distribution.extended.call_for_each_replica(model_fn)
if i == 0:
num_devices = len(traces)
self.assertGreater(num_devices, 0)
else:
# model_fn should not have been re-evaluated so the length should remain
# the same.
self.assertLen(traces, num_devices)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self, distribution):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("main/foo/" + name + ":0", v0.name)
self.assertEqual("main/replica_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self, distribution):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("foo/" + name + ":0", v0.name)
self.assertEqual("replica_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes but respects variable scope when creating variables. We test both
# methods of creating variables to make sure that we have the same
# variable names in both cases.
def testNameScopeWithVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("a:0", a0.name)
self.assertEqual("a/replica_1:0", a1.name)
self.assertEqual("b:0", b0.name)
self.assertEqual("b/replica_1:0", b1.name)
self.assertEqual("c:0", c0.name)
self.assertEqual("c/replica_1:0", c1.name)
def testVariableScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with variable_scope.variable_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with variable_scope.variable_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2)
],
mode=["graph", "eager"]))
class MirroredThreeDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testThreeDevices(self, distribution):
def model_fn():
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_mirrored(result))
self.assertEqual("foo:0", result.name)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in replica and cross replica context.
def testAssignMirroredVarReplicaContextWithoutAggregationType(self,
distribution):
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSum(self, distribution):
# Test that we don't reduce a non-per-replica value with the "sum"
# aggregation type.
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegex(
ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
"with the given reduce op ReduceOp.SUM."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEqual(6.0, mirrored_var_result)
def testAssignMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(0.5, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignAddMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
# read_value == True
mirrored_var_result = self.evaluate(
mirrored_var.assign_add(6.0, read_value=True))
self.assertEqual(7.0, mirrored_var_result)
self.assertEqual(
7.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[0]))
self.assertEqual(
7.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var._devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var._devices[1])
# read_value == False
self.evaluate(mirrored_var.assign_add(2.0, read_value=False))
self.assertEqual(
9.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[0]))
self.assertEqual(
9.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var._devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var._devices[1])
def testAssignAddMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(1.5, self.evaluate(mirrored_var))
def testAssignAddMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_add(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(6.0, self.evaluate(mirrored_var))
def testAssignSubMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(5.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEqual(3.0, mirrored_var_result)
self.assertEqual(
3.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[0]))
self.assertEqual(
3.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var._devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var._devices[1])
def testAssignSubMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.5, self.evaluate(mirrored_var))
def testAssignSubMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_sub(1.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
def testAssignMirroredVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignReplicaLocalVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(distribute_utils.is_sync_on_read(v_sum))
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(
model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.assertFalse(self.evaluate(sync_on_read_var.is_initialized()))
self.evaluate(sync_on_read_var.initializer)
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the replicas.
self.assertEqual(2.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
# Assigning 6.0 in cross replica context will assign a value of
# 6.0/num_replicas to each replica.
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the assigned value back.
# The value on all the replicas are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
def testAssignReplicaLocalVarMeanAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the MEAN of values
# on all replicas which is the value assigned in replica context.
self.assertEqual(1.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
class MockModel(object):
def __init__(self, two_variables=False):
self.variables = []
self.variables.append(variable_scope.variable(1.25, name="dummy_var1"))
if two_variables:
self.variables.append(variable_scope.variable(2.0, name="dummy_var2"))
def __call__(self, factor=2):
x = factor * self.variables[0]
if len(self.variables) > 1:
x += self.variables[1]
return x
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase):
def _call_and_check(self, distribution, model_fn, inputs, expected_result,
defuns, two_variables=False):
cpu_dev = device_util.canonicalize("CPU:0")
gpu_dev = device_util.canonicalize("GPU:0")
devices = [cpu_dev, gpu_dev]
with distribution.scope():
mock_model = MockModel(two_variables)
self.evaluate(variables.global_variables_initializer())
result = distribution.extended.call_for_each_replica(
model_fn, args=[mock_model] + inputs)
for r in range(len(devices)):
device_result = distribute_utils.select_replica(r, result)
device_expected_result = distribute_utils.select_replica(
r, expected_result)
self.assertAllClose(device_expected_result,
self.evaluate(device_result))
for defun in defuns:
# `Function`s are specialized to the current device stack, so
# call_for_each has one trace per device. To check that the expected set
# of variables was accessed on each trace, we first retrieve each
# device-specific graph function.
per_replica_graph_functions = (
distribution.extended.call_for_each_replica(
defun.get_concrete_function, args=[mock_model] + inputs))
for i in range(len(devices)):
graph_function = distribution.experimental_local_results(
per_replica_graph_functions)[i]
# TODO(b/129555712): re-enable an assertion here that the two sets of
# variables are the same.
# self.assertEqual(set(graph_function.graph.variables),
# set(mock_model.variables))
del graph_function
def testVariableInDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
def model_fn(mock_model):
return times_two(mock_model)
self._call_and_check(distribution, model_fn, [], 2.5, [times_two])
def testVariableInNestedDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
@function.defun
def two_x_plus_one(mock_model):
return times_two(mock_model) + 1
def model_fn(mock_model):
return two_x_plus_one(mock_model)
self._call_and_check(distribution, model_fn, [], 3.5,
[times_two, two_x_plus_one])
def testTwoVariablesInNestedDefun(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
return fn2(mock_model)
self._call_and_check(distribution, model_fn, [], 5.5, [fn1, fn2],
two_variables=True)
def testGradientTapeOverNestedDefuns(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
with backprop.GradientTape(persistent=True) as gtape:
result = fn2(mock_model)
grads = gtape.gradient(result,
[v._get() for v in mock_model.variables])
return grads
self._call_and_check(distribution, model_fn, [], [2.0, 1.0], [fn1, fn2],
two_variables=True)
def testPassPerReplica(self, distribution):
@function.defun
def fn1(mock_model, factor):
return mock_model(factor)
factors = values.PerReplica((5.0, 3.0))
expected_result = values.PerReplica((5.0 * 1.25, 3.0 * 1.25))
self._call_and_check(distribution, fn1, [factors], expected_result, [fn1])
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=mirrored_strategy.all_local_devices(),
cross_device_ops=cross_device_ops_lib.ReductionToOneDevice(
),
),
required_gpus=1)
],
mode=["graph"]))
class MultiWorkerMirroredStrategyTest(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
def _configure_distribution_strategy(self, distribution):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
distribution.configure(cluster_spec=cluster_spec)
def test_num_replicas_in_sync(self, distribution):
self._configure_distribution_strategy(distribution)
# We calculate the total number of gpus across the workers(2) specified in
# the cluster spec.
self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
def testMinimizeLossGraph(self, distribution):
self._configure_distribution_strategy(distribution)
self._test_minimize_loss_graph(distribution, learning_rate=0.05)
def testDeviceScope(self, distribution):
"""Test the device scope of multi-worker MirroredStrategy."""
self._configure_distribution_strategy(distribution)
with distribution.scope():
a = constant_op.constant(1.)
with ops.device("/cpu:0"):
b = constant_op.constant(1.)
self.assertEqual(a.device, "/job:worker/task:0")
self.assertEqual(b.device, "/job:worker/task:0/device:CPU:0")
def testMakeInputFnIteratorWithDataset(self, distribution):
self._configure_distribution_strategy(distribution)
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = context.num_gpus()
num_workers = 2
expected_values = [[i+j for j in range(num_gpus)] * num_workers
for i in range(0, 100, num_gpus)]
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._configure_distribution_strategy(distribution)
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
num_gpus = context.num_gpus()
num_workers = 2
expected_values = []
for i in range(0, 100, num_gpus):
expected_values.append([i+j for j in range(num_gpus)] * num_workers)
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess,
test_reinitialize=False, ignore_order=True)
def testUpdateConfigProto(self, distribution):
distribution.configure(cluster_spec={"worker": ["fake1", "fake2"]})
config_proto = config_pb2.ConfigProto()
new_config = distribution.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=["/job:worker/task:0/gpu:{}".format(
i) for i in range(context.num_gpus())]),
required_gpus=1)
],
mode=["graph"]))
class RemoteSingleWorkerMirroredStrategyGraph(
multi_worker_test_base.SingleWorkerTestBaseGraph,
strategy_test_lib.RemoteSingleWorkerMirroredStrategyBase):
def _get_num_gpus(self):
return context.num_gpus()
def testNumReplicasInSync(self, distribution):
self._testNumReplicasInSync(distribution)
def testMinimizeLoss(self, distribution):
self._testMinimizeLoss(distribution)
def testDeviceScope(self, distribution):
self._testDeviceScope(distribution)
def testMakeInputFnIteratorWithDataset(self, distribution):
self._testMakeInputFnIteratorWithDataset(distribution)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._testMakeInputFnIteratorWithCallable(distribution)
class MultiWorkerMirroredStrategyTestWithChief(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=0, has_chief=True)
cls._default_target = "grpc://" + cls._cluster_spec["chief"][0]
def _make_cross_device_ops(self):
return cross_device_ops_lib.ReductionToOneDevice()
def testMinimizeLossGraph(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategy(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategyWithOneNode(self):
with context.graph_mode():
cluster_spec = {}
cluster_spec["chief"] = self._cluster_spec["chief"]
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy()
if context.num_gpus() > 0:
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.NcclAllReduce)
else:
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.ReductionToOneDevice)
self.skipTest("b/130551176, run the following once fixed.")
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testInitializeFromTFConfig(self):
with context.graph_mode():
tf_config = {"cluster": self._cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
self.assertEqual(
max(context.num_gpus(), 1) * 3, strategy.num_replicas_in_sync)
def testSummaryForReplicaZeroOnly(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_summary_for_replica_zero_only(strategy)
class MirroredVariableStopGradientTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph"]))
def testMirroredVariableAsStopGradient(self, distribution):
with distribution.scope():
inp = constant_op.constant(1.0)
x = variables.Variable(1.0)
y = inp*x
grads = gradients.gradients(x, y, stop_gradients=x)
self.assertIsNone(grads[0])
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["eager"]))
class FunctionTest(test.TestCase, parameterized.TestCase):
def testBackwardFunctionDevicePlacement(self, distribution):
with distribution.scope():
w = variable_scope.variable([1.5], name="w")
b = variable_scope.variable([0.5], name="b")
@def_function.function
def forward(x, w, b):
return x * w + b
x = array_ops.identity([1.0], name="x_useless")
concrete_forward = forward.get_concrete_function(x, w._primary, b._primary)
with distribution.scope():
def replica_fn():
with backprop.GradientTape() as t:
x = array_ops.identity([1.0], name="x")
loss = concrete_forward(x, w._get(), b._get()) - [1.0]
return t.gradient(loss, [w, b])
def step_fn():
return distribution.run(replica_fn)
context.enable_run_metadata()
g1, g2 = step_fn()
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertEqual(self.evaluate(g1._primary), 1.0)
self.assertEqual(self.evaluate(g2._primary), 1.0)
# Verify that this node runs on both devices.
node_name = "gradients_mul_grad_mul_1_x"
devices_for_this_node = set()
for partition_graph in run_metadata.partition_graphs:
for node in partition_graph.node:
if node.name == node_name:
devices_for_this_node.add(node.device)
devices = [device_util.resolve("/device:GPU:0"),
device_util.resolve("/device:CPU:0")]
self.assertSetEqual(devices_for_this_node, set(devices))
def testFuctionPreservesAutoGraph(self, distribution):
def f():
self.assertTrue(converter_testing.is_inside_generated_code())
return 1
with distribution.scope():
@def_function.function
def replica_fn():
return f()
distribution.run(replica_fn)
def _replica_id():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if not isinstance(replica_id, ops.Tensor):
replica_id = constant_op.constant(replica_id)
return array_ops.identity(replica_id)
def _replica_id_as_int():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if isinstance(replica_id, ops.Tensor):
replica_id = tensor_util.constant_value(replica_id)
return replica_id
if __name__ == "__main__":
test.main()
| 38.943789 | 91 | 0.715528 |
acdfdc33722a4815726488f3c9de5638db117260 | 823 | py | Python | src/maintenance/config/youtube_scrapping.py | dragonee/maintenance | 2e922eecbf1fc0f9b9350595739e4176088612b6 | [
"MIT"
] | null | null | null | src/maintenance/config/youtube_scrapping.py | dragonee/maintenance | 2e922eecbf1fc0f9b9350595739e4176088612b6 | [
"MIT"
] | 15 | 2020-03-30T21:50:45.000Z | 2020-04-13T16:16:46.000Z | src/maintenance/config/youtube_scrapping.py | dragonee/maintenance | 2e922eecbf1fc0f9b9350595739e4176088612b6 | [
"MIT"
] | null | null | null |
from configparser import ConfigParser
from pathlib import Path
class YoutubeScrappingConfigFile:
cookies = None
chrome_binary = None
chromedriver_binary = None
def __init__(self):
self.reader = ConfigParser()
self.reader.read(self.paths())
try:
self.cookies = self.reader['Youtube']['cookies']
self.chromedriver_binary = self.reader['Selenium']['chromedriver_binary']
self.chrome_binary = self.reader['Selenium']['chrome_binary']
except KeyError:
raise KeyError("Create ~/.youtube.ini file with section [Youtube] containing cookies, and [Selenium] containing chrome_binary and chromedriver_binary")
def paths(self):
return [
'/etc/youtube.ini',
Path.home() / '.youtube.ini',
] | 30.481481 | 163 | 0.643985 |
acdfdc7b8f6fc21223fe5381eef696c9ea063a44 | 37,880 | py | Python | dace/transformation/helpers.py | Berke-Ates/dace | 7683e8637d16c70295c4709cd752eb3cdc6918f9 | [
"BSD-3-Clause"
] | null | null | null | dace/transformation/helpers.py | Berke-Ates/dace | 7683e8637d16c70295c4709cd752eb3cdc6918f9 | [
"BSD-3-Clause"
] | null | null | null | dace/transformation/helpers.py | Berke-Ates/dace | 7683e8637d16c70295c4709cd752eb3cdc6918f9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Transformation helper API. """
import copy
import itertools
from networkx import MultiDiGraph
from dace.subsets import Range, Subset, union
import dace.subsets as subsets
from typing import Dict, List, Optional, Tuple, Set, Union
from dace import data, dtypes, symbolic
from dace.sdfg import nodes, utils
from dace.sdfg.graph import SubgraphView, MultiConnectorEdge
from dace.sdfg.scope import ScopeSubgraphView, ScopeTree
from dace.sdfg import SDFG, SDFGState, InterstateEdge
from dace.sdfg import graph
from dace.memlet import Memlet
def nest_state_subgraph(sdfg: SDFG,
state: SDFGState,
subgraph: SubgraphView,
name: Optional[str] = None,
full_data: bool = False) -> nodes.NestedSDFG:
""" Turns a state subgraph into a nested SDFG. Operates in-place.
:param sdfg: The SDFG containing the state subgraph.
:param state: The state containing the subgraph.
:param subgraph: Subgraph to nest.
:param name: An optional name for the nested SDFG.
:param full_data: If True, nests entire input/output data.
:return: The nested SDFG node.
:raise KeyError: Some or all nodes in the subgraph are not located in
this state, or the state does not belong to the given
SDFG.
:raise ValueError: The subgraph is contained in more than one scope.
"""
if state.parent != sdfg:
raise KeyError('State does not belong to given SDFG')
if subgraph is not state and subgraph.graph is not state:
raise KeyError('Subgraph does not belong to given state')
# Find the top-level scope
scope_tree = state.scope_tree()
scope_dict = state.scope_dict()
scope_dict_children = state.scope_children()
top_scopenode = -1 # Initialized to -1 since "None" already means top-level
for node in subgraph.nodes():
if node not in scope_dict:
raise KeyError('Node not found in state')
# If scope entry/exit, ensure entire scope is in subgraph
if isinstance(node, nodes.EntryNode):
scope_nodes = scope_dict_children[node]
if any(n not in subgraph.nodes() for n in scope_nodes):
raise ValueError('Subgraph contains partial scopes (entry)')
elif isinstance(node, nodes.ExitNode):
entry = state.entry_node(node)
scope_nodes = scope_dict_children[entry] + [entry]
if any(n not in subgraph.nodes() for n in scope_nodes):
raise ValueError('Subgraph contains partial scopes (exit)')
scope_node = scope_dict[node]
if scope_node not in subgraph.nodes():
if top_scopenode != -1 and top_scopenode != scope_node:
raise ValueError('Subgraph is contained in more than one scope')
top_scopenode = scope_node
scope = scope_tree[top_scopenode]
###
# Consolidate edges in top scope
utils.consolidate_edges(sdfg, scope)
snodes = subgraph.nodes()
# Collect inputs and outputs of the nested SDFG
inputs: List[MultiConnectorEdge] = []
outputs: List[MultiConnectorEdge] = []
for node in snodes:
for edge in state.in_edges(node):
if edge.src not in snodes:
inputs.append(edge)
for edge in state.out_edges(node):
if edge.dst not in snodes:
outputs.append(edge)
# Collect transients not used outside of subgraph (will be removed of
# top-level graph)
data_in_subgraph = set(n.data for n in subgraph.nodes() if isinstance(n, nodes.AccessNode))
# Find other occurrences in SDFG
other_nodes = set(n.data for s in sdfg.nodes() for n in s.nodes()
if isinstance(n, nodes.AccessNode) and n not in subgraph.nodes())
subgraph_transients = set()
for data in data_in_subgraph:
datadesc = sdfg.arrays[data]
if datadesc.transient and data not in other_nodes:
subgraph_transients.add(data)
# All transients of edges between code nodes are also added to nested graph
for edge in subgraph.edges():
if (isinstance(edge.src, nodes.CodeNode) and isinstance(edge.dst, nodes.CodeNode)):
subgraph_transients.add(edge.data.data)
# Collect data used in access nodes within subgraph (will be referenced in
# full upon nesting)
input_arrays = set()
output_arrays = {}
for node in subgraph.nodes():
if (isinstance(node, nodes.AccessNode) and node.data not in subgraph_transients):
if node.has_reads(state):
input_arrays.add(node.data)
if node.has_writes(state):
output_arrays[node.data] = state.in_edges(node)[0].data.wcr
# Create the nested SDFG
nsdfg = SDFG(name or 'nested_' + state.label)
# Transients are added to the nested graph as-is
for name in subgraph_transients:
nsdfg.add_datadesc(name, sdfg.arrays[name])
# Input/output data that are not source/sink nodes are added to the graph
# as non-transients
for name in (input_arrays | output_arrays.keys()):
datadesc = copy.deepcopy(sdfg.arrays[name])
datadesc.transient = False
nsdfg.add_datadesc(name, datadesc)
# Connected source/sink nodes outside subgraph become global data
# descriptors in nested SDFG
input_names = {}
output_names = {}
global_subsets: Dict[str, Tuple[str, Subset]] = {}
for edge in inputs:
if edge.data.data is None: # Skip edges with an empty memlet
continue
name = edge.data.data
if name not in global_subsets:
datadesc = copy.deepcopy(sdfg.arrays[edge.data.data])
datadesc.transient = False
if not full_data:
datadesc.shape = edge.data.subset.size()
new_name = nsdfg.add_datadesc(name, datadesc, find_new_name=True)
global_subsets[name] = (new_name, edge.data.subset)
else:
new_name, subset = global_subsets[name]
if not full_data:
new_subset = union(subset, edge.data.subset)
if new_subset is None:
new_subset = Range.from_array(sdfg.arrays[name])
global_subsets[name] = (new_name, new_subset)
nsdfg.arrays[new_name].shape = new_subset.size()
input_names[edge] = new_name
for edge in outputs:
if edge.data.data is None: # Skip edges with an empty memlet
continue
name = edge.data.data
if name not in global_subsets:
datadesc = copy.deepcopy(sdfg.arrays[edge.data.data])
datadesc.transient = False
if not full_data:
datadesc.shape = edge.data.subset.size()
new_name = nsdfg.add_datadesc(name, datadesc, find_new_name=True)
global_subsets[name] = (new_name, edge.data.subset)
else:
new_name, subset = global_subsets[name]
if not full_data:
new_subset = union(subset, edge.data.subset)
if new_subset is None:
new_subset = Range.from_array(sdfg.arrays[name])
global_subsets[name] = (new_name, new_subset)
nsdfg.arrays[new_name].shape = new_subset.size()
output_names[edge] = new_name
###################
# Add scope symbols to the nested SDFG
defined_vars = set(
symbolic.pystr_to_symbolic(s) for s in (state.symbols_defined_at(top_scopenode).keys()
| sdfg.symbols))
for v in defined_vars:
if v in sdfg.symbols:
sym = sdfg.symbols[v]
nsdfg.add_symbol(v, sym.dtype)
# Add constants to nested SDFG
for cstname, cstval in sdfg.constants.items():
nsdfg.add_constant(cstname, cstval)
# Create nested state
nstate = nsdfg.add_state()
# Add subgraph nodes and edges to nested state
nstate.add_nodes_from(subgraph.nodes())
for e in subgraph.edges():
nstate.add_edge(e.src, e.src_conn, e.dst, e.dst_conn, copy.deepcopy(e.data))
# Modify nested SDFG parents in subgraph
for node in subgraph.nodes():
if isinstance(node, nodes.NestedSDFG):
node.sdfg.parent = nstate
node.sdfg.parent_sdfg = nsdfg
node.sdfg.parent_nsdfg_node = node
# Add access nodes and edges as necessary
edges_to_offset = []
for edge, name in input_names.items():
node = nstate.add_read(name)
new_edge = copy.deepcopy(edge.data)
new_edge.data = name
edges_to_offset.append((edge, nstate.add_edge(node, None, edge.dst, edge.dst_conn, new_edge)))
for edge, name in output_names.items():
node = nstate.add_write(name)
new_edge = copy.deepcopy(edge.data)
new_edge.data = name
edges_to_offset.append((edge, nstate.add_edge(edge.src, edge.src_conn, node, None, new_edge)))
# Offset memlet paths inside nested SDFG according to subsets
for original_edge, new_edge in edges_to_offset:
for edge in nstate.memlet_tree(new_edge):
edge.data.data = new_edge.data.data
if not full_data:
edge.data.subset.offset(global_subsets[original_edge.data.data][1], True)
# Add nested SDFG node to the input state
nested_sdfg = state.add_nested_sdfg(nsdfg, None,
set(input_names.values()) | input_arrays,
set(output_names.values()) | output_arrays.keys())
# Reconnect memlets to nested SDFG
reconnected_in = set()
reconnected_out = set()
empty_input = None
empty_output = None
for edge in inputs:
if edge.data.data is None:
empty_input = edge
continue
name = input_names[edge]
if name in reconnected_in:
continue
if full_data:
data = Memlet.from_array(edge.data.data, sdfg.arrays[edge.data.data])
else:
data = copy.deepcopy(edge.data)
data.subset = global_subsets[edge.data.data][1]
state.add_edge(edge.src, edge.src_conn, nested_sdfg, name, data)
reconnected_in.add(name)
for edge in outputs:
if edge.data.data is None:
empty_output = edge
continue
name = output_names[edge]
if name in reconnected_out:
continue
if full_data:
data = Memlet.from_array(edge.data.data, sdfg.arrays[edge.data.data])
else:
data = copy.deepcopy(edge.data)
data.subset = global_subsets[edge.data.data][1]
data.wcr = edge.data.wcr
state.add_edge(nested_sdfg, name, edge.dst, edge.dst_conn, data)
reconnected_out.add(name)
# Connect access nodes to internal input/output data as necessary
entry = scope.entry
exit = scope.exit
for name in input_arrays:
node = state.add_read(name)
if entry is not None:
state.add_nedge(entry, node, Memlet())
state.add_edge(node, None, nested_sdfg, name, Memlet.from_array(name, sdfg.arrays[name]))
for name, wcr in output_arrays.items():
node = state.add_write(name)
if exit is not None:
state.add_nedge(node, exit, Memlet())
state.add_edge(nested_sdfg, name, node, None, Memlet(data=name, wcr=wcr))
# Graph was not reconnected, but needs to be
if state.in_degree(nested_sdfg) == 0 and empty_input is not None:
state.add_edge(empty_input.src, empty_input.src_conn, nested_sdfg, None, empty_input.data)
if state.out_degree(nested_sdfg) == 0 and empty_output is not None:
state.add_edge(nested_sdfg, None, empty_output.dst, empty_output.dst_conn, empty_output.data)
# Remove subgraph nodes from graph
state.remove_nodes_from(subgraph.nodes())
# Remove subgraph transients from top-level graph
for transient in subgraph_transients:
del sdfg.arrays[transient]
# Remove newly isolated nodes due to memlet consolidation
for edge in inputs:
if state.in_degree(edge.src) + state.out_degree(edge.src) == 0:
state.remove_node(edge.src)
for edge in outputs:
if state.in_degree(edge.dst) + state.out_degree(edge.dst) == 0:
state.remove_node(edge.dst)
return nested_sdfg
def state_fission(sdfg: SDFG, subgraph: graph.SubgraphView) -> SDFGState:
'''
Given a subgraph, adds a new SDFG state before the state that contains it,
removes the subgraph from the original state, and connects the two states.
:param subgraph: the subgraph to remove.
:return: the newly created SDFG state.
'''
state: SDFGState = subgraph.graph
newstate = sdfg.add_state_before(state)
# Save edges before removing nodes
orig_edges = subgraph.edges()
# Mark boundary access nodes to keep after fission
nodes_to_remove = set(subgraph.nodes())
boundary_nodes = [n for n in subgraph.nodes() if len(state.out_edges(n)) > len(subgraph.out_edges(n))
] + [n for n in subgraph.nodes() if len(state.in_edges(n)) > len(subgraph.in_edges(n))]
# Make dictionary of nodes to add to new state
new_nodes = {n: n for n in subgraph.nodes()}
new_nodes.update({b: copy.deepcopy(b) for b in boundary_nodes})
nodes_to_remove -= set(boundary_nodes)
state.remove_nodes_from(nodes_to_remove)
for n in new_nodes.values():
if isinstance(n, nodes.NestedSDFG):
# Set the new parent state
n.sdfg.parent = newstate
newstate.add_nodes_from(new_nodes.values())
for e in orig_edges:
newstate.add_edge(new_nodes[e.src], e.src_conn, new_nodes[e.dst], e.dst_conn, e.data)
return newstate
def _get_internal_subset(internal_memlet: Memlet,
external_memlet: Memlet,
use_src_subset: bool = False,
use_dst_subset: bool = False) -> subsets.Subset:
if (internal_memlet.data != external_memlet.data and internal_memlet.other_subset is not None):
return internal_memlet.other_subset
if not use_src_subset and not use_dst_subset:
return internal_memlet.subset
if use_src_subset and use_dst_subset:
raise ValueError('Source and destination subsets cannot be ' 'specified at the same time')
if use_src_subset:
return internal_memlet.src_subset
if use_dst_subset:
return internal_memlet.dst_subset
return internal_memlet.subset
def unsqueeze_memlet(internal_memlet: Memlet,
external_memlet: Memlet,
preserve_minima: bool = False,
use_src_subset: bool = False,
use_dst_subset: bool = False) -> Memlet:
""" Unsqueezes and offsets a memlet, as per the semantics of nested
SDFGs.
:param internal_memlet: The internal memlet (inside nested SDFG)
before modification.
:param external_memlet: The external memlet before modification.
:param preserve_minima: Do not change the subset's minimum elements.
:param use_src_subset: If both sides of the memlet refer to same array,
prefer source subset.
:param use_dst_subset: If both sides of the memlet refer to same array,
prefer destination subset.
:return: Offset Memlet to set on the resulting graph.
"""
internal_subset = _get_internal_subset(internal_memlet, external_memlet, use_src_subset, use_dst_subset)
result = copy.deepcopy(internal_memlet)
result.data = external_memlet.data
result.other_subset = None
result.subset = copy.deepcopy(internal_subset)
shape = external_memlet.subset.size()
if len(internal_subset) < len(external_memlet.subset):
ones = [i for i, d in enumerate(shape) if d == 1]
# Special case: If internal memlet is one element and the top
# memlet uses all its dimensions, ignore the internal element
# TODO: There must be a better solution
if (len(internal_subset) == 1 and ones == list(range(len(shape)))
and (internal_subset[0] == (0, 0, 1) or internal_subset[0] == 0)):
to_unsqueeze = ones[1:]
else:
to_unsqueeze = ones
result.subset.unsqueeze(to_unsqueeze)
elif len(internal_subset) > len(external_memlet.subset):
# Try to squeeze internal memlet
result.subset.squeeze()
if len(result.subset) != len(external_memlet.subset):
raise ValueError('Unexpected extra dimensions in internal memlet '
'while un-squeezing memlet.\nExternal memlet: %s\n'
'Internal memlet: %s' % (external_memlet, internal_memlet))
result.subset.offset(external_memlet.subset, False)
if preserve_minima:
if len(result.subset) != len(external_memlet.subset):
raise ValueError('Memlet specifies reshape that cannot be un-squeezed.\n'
'External memlet: %s\nInternal memlet: %s' % (external_memlet, internal_memlet))
original_minima = external_memlet.subset.min_element()
for i in set(range(len(original_minima))):
rb, re, rs = result.subset.ranges[i]
result.subset.ranges[i] = (original_minima[i], re, rs)
# TODO: Offset rest of memlet according to other_subset
if external_memlet.other_subset is not None:
raise NotImplementedError
return result
def replicate_scope(sdfg: SDFG, state: SDFGState, scope: ScopeSubgraphView) -> ScopeSubgraphView:
"""
Replicates a scope subgraph view within a state, reconnecting all external
edges to the same nodes.
:param sdfg: The SDFG in which the subgraph scope resides.
:param state: The SDFG state in which the subgraph scope resides.
:param scope: The scope subgraph to replicate.
:return: A reconnected replica of the scope.
"""
exit_node = state.exit_node(scope.entry)
# Replicate internal graph
new_nodes = []
new_entry = None
new_exit = None
to_find_new_names: Set[nodes.AccessNode] = set()
for node in scope.nodes():
node_copy = copy.deepcopy(node)
if node == scope.entry:
new_entry = node_copy
elif node == exit_node:
new_exit = node_copy
if (isinstance(node, nodes.AccessNode) and node.desc(sdfg).lifetime == dtypes.AllocationLifetime.Scope
and node.desc(sdfg).transient):
to_find_new_names.add(node_copy)
state.add_node(node_copy)
new_nodes.append(node_copy)
for edge in scope.edges():
src = scope.nodes().index(edge.src)
dst = scope.nodes().index(edge.dst)
state.add_edge(new_nodes[src], edge.src_conn, new_nodes[dst], edge.dst_conn, copy.deepcopy(edge.data))
# Reconnect external scope nodes
for edge in state.in_edges(scope.entry):
state.add_edge(edge.src, edge.src_conn, new_entry, edge.dst_conn, copy.deepcopy(edge.data))
for edge in state.out_edges(exit_node):
state.add_edge(new_exit, edge.src_conn, edge.dst, edge.dst_conn, copy.deepcopy(edge.data))
# Set the exit node's map to match the entry node
new_exit.map = new_entry.map
# Replicate all temporary transients within scope
for node in to_find_new_names:
desc = node.desc(sdfg)
new_name = sdfg.add_datadesc(node.data, copy.deepcopy(desc), find_new_name=True)
node.data = new_name
for edge in state.all_edges(node):
for e in state.memlet_tree(edge):
e.data.data = new_name
return ScopeSubgraphView(state, new_nodes, new_entry)
def offset_map(sdfg: SDFG,
state: SDFGState,
entry: nodes.MapEntry,
dim: int,
offset: symbolic.SymbolicType,
negative: bool = True):
"""
Offsets a map parameter and its contents by a value.
:param sdfg: The SDFG in which the map resides.
:param state: The state in which the map resides.
:param entry: The map entry node.
:param dim: The map dimension to offset.
:param offset: The value to offset by.
:param negative: If True, offsets by ``-offset``.
"""
entry.map.range.offset(offset, negative, indices=[dim])
param = entry.map.params[dim]
subgraph = state.scope_subgraph(entry)
# Offset map param by -offset, contents by +offset and vice versa
if negative:
subgraph.replace(param, f'({param} + {offset})')
else:
subgraph.replace(param, f'({param} - {offset})')
def split_interstate_edges(sdfg: SDFG) -> None:
"""
Splits all inter-state edges into edges with conditions and edges with
assignments. This procedure helps in nested loop detection.
:param sdfg: The SDFG to split
:note: Operates in-place on the SDFG.
"""
for e in sdfg.edges():
if e.data.assignments and not e.data.is_unconditional():
tmpstate = sdfg.add_state()
sdfg.add_edge(e.src, tmpstate, InterstateEdge(condition=e.data.condition))
sdfg.add_edge(tmpstate, e.dst, InterstateEdge(assignments=e.data.assignments))
sdfg.remove_edge(e)
def is_symbol_unused(sdfg: SDFG, sym: str) -> bool:
"""
Checks for uses of symbol in an SDFG, and if there are none returns False.
:param sdfg: The SDFG to search.
:param sym: The symbol to test.
:return: True if the symbol can be removed, False otherwise.
"""
for desc in sdfg.arrays.values():
if sym in map(str, desc.free_symbols):
return False
for state in sdfg.nodes():
if sym in state.free_symbols:
return False
for e in sdfg.edges():
if sym in e.data.free_symbols:
return False
# Not found, symbol can be removed
return True
def are_subsets_contiguous(subset_a: subsets.Subset, subset_b: subsets.Subset, dim: int = None) -> bool:
if dim is not None:
# A version that only checks for contiguity in certain
# dimension (e.g., to prioritize stride-1 range)
if (not isinstance(subset_a, subsets.Range) or not isinstance(subset_b, subsets.Range)):
raise NotImplementedError('Contiguous subset check only ' 'implemented for ranges')
# Other dimensions must be equal
for i, (s1, s2) in enumerate(zip(subset_a.ranges, subset_b.ranges)):
if i == dim:
continue
if s1[0] != s2[0] or s1[1] != s2[1] or s1[2] != s2[2]:
return False
# Set of conditions for contiguous dimension
ab = (subset_a[dim][1] + 1) == subset_b[dim][0]
a_overlap_b = subset_a[dim][1] >= subset_b[dim][0]
ba = (subset_b[dim][1] + 1) == subset_a[dim][0]
b_overlap_a = subset_b[dim][1] >= subset_a[dim][0]
# NOTE: Must check with "==" due to sympy using special types
return (ab == True or a_overlap_b == True or ba == True or b_overlap_a == True)
# General case
bbunion = subsets.bounding_box_union(subset_a, subset_b)
try:
if bbunion.num_elements() == (subset_a.num_elements() + subset_b.num_elements()):
return True
except TypeError:
pass
return False
def find_contiguous_subsets(subset_list: List[subsets.Subset], dim: int = None) -> Set[subsets.Subset]:
"""
Finds the set of largest contiguous subsets in a list of subsets.
:param subsets: Iterable of subset objects.
:param dim: Check for contiguity only for the specified dimension.
:return: A list of contiguous subsets.
"""
# Currently O(n^3) worst case. TODO: improve
subset_set = set(subsets.Range.from_indices(s) if isinstance(s, subsets.Indices) else s for s in subset_list)
while True:
for sa, sb in itertools.product(subset_set, subset_set):
if sa is sb:
continue
if sa.covers(sb):
subset_set.remove(sb)
break
elif sb.covers(sa):
subset_set.remove(sa)
break
elif are_subsets_contiguous(sa, sb, dim):
subset_set.remove(sa)
subset_set.remove(sb)
subset_set.add(subsets.bounding_box_union(sa, sb))
break
else: # No modification performed
break
return subset_set
def constant_symbols(sdfg: SDFG) -> Set[str]:
"""
Returns a set of symbols that will never change values throughout the course
of the given SDFG. Specifically, these are the input symbols (i.e., not
defined in a particular scope) that are never set by interstate edges.
:param sdfg: The input SDFG.
:return: A set of symbol names that remain constant throughout the SDFG.
"""
interstate_symbols = {k for e in sdfg.edges() for k in e.data.assignments.keys()}
return set(sdfg.symbols) - interstate_symbols
def simplify_state(state: SDFGState, remove_views: bool = False) -> MultiDiGraph:
"""
Returns a networkx MultiDiGraph object that contains all the access nodes
and corresponding edges of an SDFG state. The removed code nodes and map
scopes are replaced by edges that connect their ancestor and succesor access
nodes.
:param state: The input SDFG state.
:return: The MultiDiGraph object.
"""
sdfg = state.parent
# Copy the whole state
G = MultiDiGraph()
for n in state.nodes():
G.add_node(n)
for n in state.nodes():
for e in state.all_edges(n):
G.add_edge(e.src, e.dst)
# Collapse all mappings and their scopes into one node
scope_children = state.scope_children()
for n in scope_children[None]:
if isinstance(n, nodes.EntryNode):
G.add_edges_from([(n, x) for (y, x) in G.out_edges(state.exit_node(n))])
G.remove_nodes_from(scope_children[n])
# Remove all nodes that are not AccessNodes or have incoming
# wcr edges and connect their predecessors and successors
for n in state.nodes():
if n in G.nodes():
if (not isinstance(n, nodes.AccessNode) or (remove_views and isinstance(sdfg.arrays[n.data], data.View))):
for p in G.predecessors(n):
for c in G.successors(n):
G.add_edge(p, c)
G.remove_node(n)
else:
for e in state.all_edges(n):
if e.data.wcr is not None:
for p in G.predecessors(n):
for s in G.successors(n):
G.add_edge(p, s)
G.remove_node(n)
break
return G
def tile(sdfg: SDFG, map_entry: nodes.MapEntry, divides_evenly: bool, skew: bool, **tile_sizes: symbolic.SymbolicType):
"""
Helper function that tiles a Map scope by the given sizes, in the
given order.
:param sdfg: The SDFG where the map resides.
:param map_entry: The map entry node to tile.
:param divides_evenly: If True, skips pre/postamble for cases
where the map dimension is not a multiplier
of the tile size.
:param skew: If True, skews the tiled map to start from zero. Helps
compilers improve performance in certain cases.
:param tile_sizes: An ordered dictionary of the map parameter names
to tile and their respective tile size (which can be
symbolic expressions).
"""
# Avoid import loop
from dace.transformation.dataflow import StripMining
for k, v in tile_sizes.items():
StripMining.apply_to(sdfg,
dict(dim_idx=map_entry.params.index(k),
tile_size=str(v),
divides_evenly=divides_evenly,
skew=skew),
map_entry=map_entry)
def permute_map(map_entry: nodes.MapEntry, perm: List[int]):
""" Permutes indices of a map according to a given list of integers. """
map_entry.map.params = [map_entry.map.params[p] for p in perm]
map_entry.map.range = [map_entry.map.range[p] for p in perm]
def extract_map_dims(sdfg: SDFG, map_entry: nodes.MapEntry, dims: List[int]) -> Tuple[nodes.MapEntry, nodes.MapEntry]:
"""
Helper function that extracts specific map dimensions into an outer map.
:param sdfg: The SDFG where the map resides.
:param map_entry: Map entry node to extract.
:param dims: A list of dimension indices to extract.
:return: A 2-tuple containing the extracted map and the remainder map.
"""
# Avoid import loop
from dace.transformation.dataflow import MapCollapse, MapExpansion
# Make extracted dimensions first
permute_map(map_entry, dims + [i for i in range(len(map_entry.map.params)) if i not in dims])
# Expand map
if len(map_entry.map.params) > 1:
entries = MapExpansion.apply_to(sdfg, map_entry=map_entry)
# Collapse extracted maps
extracted_map = entries[0]
for idx in range(len(dims) - 1):
extracted_map, _ = MapCollapse.apply_to(
sdfg,
outer_map_entry=extracted_map,
inner_map_entry=entries[idx + 1],
permissive=True, # Since MapExpansion creates sequential maps
)
# Collapse remaining maps
map_to_collapse = entries[len(dims)]
for idx in range(len(dims), len(entries) - 1):
map_to_collapse, _ = MapCollapse.apply_to(
sdfg,
outer_map_entry=map_to_collapse,
inner_map_entry=entries[idx + 1],
permissive=True, # Since MapExpansion creates sequential maps
)
else:
extracted_map = map_entry
map_to_collapse = map_entry
return extracted_map, map_to_collapse
def scope_tree_recursive(state: SDFGState, entry: Optional[nodes.EntryNode] = None) -> ScopeTree:
"""
Returns a scope tree that includes scopes from nested SDFGs.
:param state: The state that contains the root of the scope tree.
:param entry: A scope entry node to set as root, otherwise the state is
the root if None is given.
"""
stree = state.scope_tree()[entry]
stree.state = state # Annotate state in tree
# Add nested SDFGs as children
def traverse(state: SDFGState, treenode: ScopeTree):
snodes = state.scope_children()[treenode.entry]
for node in snodes:
if isinstance(node, nodes.NestedSDFG):
for nstate in node.sdfg.nodes():
ntree = nstate.scope_tree()[None]
ntree.state = nstate
treenode.children.append(ntree)
for child in treenode.children:
traverse(getattr(child, 'state', state), child)
traverse(state, stree)
return stree
def get_internal_scopes(state: SDFGState,
entry: nodes.EntryNode,
immediate: bool = False) -> List[Tuple[SDFGState, nodes.EntryNode]]:
"""
Returns all internal scopes within a given scope, including if they
reside in nested SDFGs.
:param state: State in which entry node resides.
:param entry: The entry node to start from.
:param immediate: If True, only returns the scopes that are immediately
nested in the map.
"""
stree = scope_tree_recursive(state, entry)
result = []
def traverse(state: SDFGState, treenode: ScopeTree):
for child in treenode.children:
if child.entry is not None:
result.append((state, child.entry))
if not immediate:
traverse(state, child)
else: # Nested SDFG
traverse(child.state, child)
traverse(state, stree)
return result
def gpu_map_has_explicit_threadblocks(state: SDFGState, entry: nodes.EntryNode) -> bool:
"""
Returns True if GPU_Device map has explicit thread-block maps nested within.
"""
internal_maps = get_internal_scopes(state, entry)
if any(m.schedule in (dtypes.ScheduleType.GPU_ThreadBlock, dtypes.ScheduleType.GPU_ThreadBlock_Dynamic)
for _, m in internal_maps):
return True
imm_maps = get_internal_scopes(state, entry, immediate=True)
if any(m.schedule == dtypes.ScheduleType.Default for _, m in imm_maps):
return True
return False
def reconnect_edge_through_map(
state: SDFGState, edge: graph.MultiConnectorEdge[Memlet], new_node: Union[nodes.EntryNode, nodes.ExitNode],
keep_src: bool) -> Tuple[graph.MultiConnectorEdge[Memlet], graph.MultiConnectorEdge[Memlet]]:
"""
Reconnects an edge through a map scope, removes old edge, and returns the
two new edges.
:param state: The state in which the edge and map reside.
:param edge: The edge to reconnect and remove.
:param new_node: The scope (map) entry or exit to reconnect through.
:param keep_src: If True, keeps the source of the edge intact, otherwise
keeps destination of edge.
:return: A 2-tuple of (incoming edge, outgoing edge).
"""
if keep_src:
result = state.add_edge_pair(new_node,
edge.dst,
edge.src,
edge.data,
internal_connector=edge.dst_conn,
external_connector=edge.src_conn)
else:
result = state.add_edge_pair(new_node,
edge.src,
edge.dst,
edge.data,
internal_connector=edge.src_conn,
external_connector=edge.dst_conn)
state.remove_edge(edge)
return result
def contained_in(state: SDFGState, node: nodes.Node, scope: nodes.EntryNode) -> bool:
"""
Returns true if the specified node is contained within the scope opened
by the given entry node (including through nested SDFGs).
"""
# A node is contained within itself
if node is scope:
return True
cursdfg = state.parent
curstate = state
curscope = state.entry_node(node)
while cursdfg is not None:
while curscope is not None:
if curscope is scope:
return True
curscope = curstate.entry_node(curscope)
curstate = cursdfg.parent
curscope = cursdfg.parent_nsdfg_node
cursdfg = cursdfg.parent_sdfg
return False
def get_parent_map(state: SDFGState, node: Optional[nodes.Node] = None) -> Optional[Tuple[nodes.EntryNode, SDFGState]]:
"""
Returns the map in which the state (and node) are contained in, or None if
it is free.
:param state: The state to test or parent of the node to test.
:param node: The node to test (optional).
:return: A tuple of (entry node, state) or None.
"""
cursdfg = state.parent
curstate = state
curscope = node
while cursdfg is not None:
if curscope is not None:
curscope = curstate.entry_node(curscope)
if curscope is not None:
return curscope, curstate
curstate = cursdfg.parent
curscope = cursdfg.parent_nsdfg_node
cursdfg = cursdfg.parent_sdfg
return None
def redirect_edge(state: SDFGState,
edge: graph.MultiConnectorEdge[Memlet],
new_src: Optional[nodes.Node] = None,
new_dst: Optional[nodes.Node] = None,
new_src_conn: Optional[str] = None,
new_dst_conn: Optional[str] = None,
new_data: Optional[str] = None,
new_memlet: Optional[Memlet] = None) -> graph.MultiConnectorEdge[Memlet]:
"""
Redirects an edge in a state. Choose which elements to override by setting
the keyword arguments.
:param state: The SDFG state in which the edge resides.
:param edge: The edge to redirect.
:param new_src: If provided, redirects the source of the new edge.
:param new_dst: If provided, redirects the destination of the new edge.
:param new_src_conn: If provided, renames the source connector of the edge.
:param new_dst_conn: If provided, renames the destination connector of the
edge.
:param new_data: If provided, changes the data on the memlet of the edge,
and the entire associated memlet tree.
:param new_memlet: If provided, changes only the memlet of the new edge.
:return: The new, redirected edge.
:note: ``new_data`` and ``new_memlet`` cannot be used at the same time.
"""
if new_data is not None and new_memlet is not None:
raise ValueError('new_data and new_memlet cannot both be given.')
mtree = None
if new_data is not None:
mtree = state.memlet_tree(edge)
state.remove_edge(edge)
if new_data is not None:
memlet = copy.deepcopy(edge.data)
memlet.data = new_data
# Rename on full memlet tree
for e in mtree:
e.data.data = new_data
else:
memlet = new_memlet or edge.data
new_edge = state.add_edge(new_src or edge.src, new_src_conn or edge.src_conn, new_dst or edge.dst, new_dst_conn
or edge.dst_conn, memlet)
return new_edge
| 41.084599 | 119 | 0.632524 |
acdfdc964b107a685816917cedfa8990e6d036ab | 1,150 | py | Python | neuralink/parameters.py | couyang24/DeepLearning | 686426e9f3c0e205f9418d7145733610cac5ee19 | [
"Apache-2.0"
] | 2 | 2022-03-21T01:46:01.000Z | 2022-03-22T14:01:14.000Z | neuralink/parameters.py | couyang24/DeepLearning | 686426e9f3c0e205f9418d7145733610cac5ee19 | [
"Apache-2.0"
] | 4 | 2022-03-17T13:02:00.000Z | 2022-03-23T16:14:06.000Z | neuralink/parameters.py | couyang24/DeepLearning | 686426e9f3c0e205f9418d7145733610cac5ee19 | [
"Apache-2.0"
] | null | null | null | """Module contains update parameters functions"""
import copy
from neuralink.base import Baseparams
class Parameters(Baseparams):
def update(self, params, grads, learning_rate=1.2):
"""
Update parameters using gradient descent
Arguments:
params -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
parameters = params.copy()
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l + 1)] = (
parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
)
parameters["b" + str(l + 1)] = (
parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
)
return parameters
| 33.823529 | 88 | 0.56087 |
acdfde39c2e5b94e1df7136e3afd6f9165356441 | 37,678 | py | Python | instances/passenger_demand/pas-20210421-2109-int1/72.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int1/72.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int1/72.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 2250
passenger_arriving = (
(2, 4, 3, 2, 1, 0, 9, 9, 6, 2, 1, 0), # 0
(2, 5, 1, 0, 2, 0, 2, 3, 3, 2, 1, 0), # 1
(4, 3, 5, 2, 2, 0, 6, 5, 5, 2, 1, 0), # 2
(1, 3, 8, 3, 0, 0, 8, 6, 0, 3, 3, 0), # 3
(4, 6, 0, 2, 2, 0, 2, 4, 0, 1, 1, 0), # 4
(3, 9, 4, 3, 1, 0, 3, 7, 3, 3, 1, 0), # 5
(4, 4, 7, 2, 1, 0, 5, 7, 5, 4, 0, 0), # 6
(2, 4, 1, 1, 0, 0, 5, 4, 3, 5, 0, 0), # 7
(3, 7, 3, 4, 1, 0, 5, 3, 2, 4, 0, 0), # 8
(2, 6, 2, 1, 2, 0, 8, 6, 2, 1, 1, 0), # 9
(2, 6, 3, 1, 2, 0, 4, 7, 7, 3, 3, 0), # 10
(1, 11, 2, 3, 2, 0, 2, 4, 5, 3, 1, 0), # 11
(4, 7, 3, 1, 2, 0, 1, 5, 3, 5, 2, 0), # 12
(3, 7, 8, 1, 3, 0, 6, 6, 2, 4, 4, 0), # 13
(3, 3, 2, 2, 1, 0, 5, 3, 8, 7, 0, 0), # 14
(4, 7, 3, 2, 1, 0, 7, 10, 5, 6, 1, 0), # 15
(0, 9, 6, 6, 2, 0, 9, 9, 4, 10, 0, 0), # 16
(4, 4, 3, 3, 1, 0, 7, 6, 4, 6, 1, 0), # 17
(8, 6, 3, 2, 1, 0, 4, 5, 4, 5, 3, 0), # 18
(1, 5, 5, 2, 2, 0, 1, 5, 2, 3, 3, 0), # 19
(4, 8, 7, 2, 3, 0, 3, 8, 4, 7, 2, 0), # 20
(4, 6, 5, 0, 1, 0, 2, 6, 5, 6, 1, 0), # 21
(3, 7, 6, 2, 0, 0, 3, 4, 4, 1, 4, 0), # 22
(5, 11, 5, 4, 1, 0, 3, 10, 2, 5, 1, 0), # 23
(4, 5, 8, 5, 3, 0, 2, 11, 3, 4, 0, 0), # 24
(5, 7, 1, 4, 1, 0, 3, 7, 1, 4, 2, 0), # 25
(4, 5, 6, 4, 2, 0, 4, 8, 4, 1, 1, 0), # 26
(1, 5, 8, 3, 3, 0, 8, 2, 4, 1, 1, 0), # 27
(2, 5, 3, 2, 1, 0, 4, 9, 0, 4, 2, 0), # 28
(5, 6, 6, 6, 4, 0, 10, 8, 4, 2, 2, 0), # 29
(1, 4, 3, 4, 0, 0, 4, 2, 4, 6, 1, 0), # 30
(2, 6, 3, 2, 4, 0, 5, 5, 9, 1, 4, 0), # 31
(1, 10, 4, 0, 1, 0, 3, 7, 5, 8, 2, 0), # 32
(4, 5, 3, 3, 1, 0, 9, 10, 4, 2, 2, 0), # 33
(5, 5, 5, 1, 4, 0, 8, 7, 1, 0, 3, 0), # 34
(3, 7, 3, 4, 0, 0, 3, 2, 5, 4, 2, 0), # 35
(5, 9, 4, 5, 1, 0, 7, 7, 3, 1, 0, 0), # 36
(2, 7, 6, 2, 3, 0, 5, 10, 3, 2, 1, 0), # 37
(2, 8, 4, 2, 2, 0, 7, 10, 3, 2, 3, 0), # 38
(2, 7, 4, 2, 4, 0, 5, 9, 8, 0, 3, 0), # 39
(3, 6, 4, 2, 2, 0, 1, 11, 8, 3, 3, 0), # 40
(2, 11, 7, 3, 4, 0, 3, 8, 1, 2, 3, 0), # 41
(6, 9, 2, 3, 1, 0, 7, 6, 5, 7, 3, 0), # 42
(6, 2, 3, 4, 0, 0, 5, 5, 5, 8, 0, 0), # 43
(1, 5, 4, 4, 1, 0, 5, 7, 5, 1, 0, 0), # 44
(2, 5, 6, 1, 3, 0, 4, 1, 5, 2, 2, 0), # 45
(6, 6, 5, 5, 3, 0, 4, 7, 3, 4, 0, 0), # 46
(1, 3, 5, 5, 1, 0, 7, 10, 7, 1, 1, 0), # 47
(1, 5, 2, 1, 2, 0, 2, 6, 5, 3, 0, 0), # 48
(3, 6, 6, 4, 0, 0, 2, 2, 3, 6, 0, 0), # 49
(5, 6, 4, 2, 1, 0, 4, 11, 3, 4, 1, 0), # 50
(4, 8, 5, 4, 1, 0, 7, 6, 3, 2, 2, 0), # 51
(4, 13, 2, 1, 2, 0, 6, 10, 4, 4, 0, 0), # 52
(5, 13, 5, 2, 4, 0, 2, 3, 4, 3, 2, 0), # 53
(4, 7, 3, 1, 1, 0, 2, 3, 2, 2, 1, 0), # 54
(6, 12, 4, 4, 2, 0, 2, 6, 4, 2, 1, 0), # 55
(3, 10, 3, 0, 4, 0, 1, 4, 7, 2, 1, 0), # 56
(1, 9, 8, 2, 0, 0, 2, 4, 4, 6, 2, 0), # 57
(2, 12, 4, 2, 1, 0, 2, 6, 8, 4, 4, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(2.649651558384548, 6.796460700757575, 7.9942360218509, 6.336277173913043, 7.143028846153846, 4.75679347826087), # 0
(2.6745220100478, 6.872041598712823, 8.037415537524994, 6.371564387077295, 7.196566506410256, 4.7551721391908215), # 1
(2.699108477221734, 6.946501402918069, 8.07957012282205, 6.406074879227053, 7.248974358974359, 4.753501207729468), # 2
(2.72339008999122, 7.019759765625, 8.120668982969152, 6.4397792119565205, 7.300204326923078, 4.7517809103260875), # 3
(2.747345978441128, 7.091736339085298, 8.160681323193373, 6.472647946859904, 7.350208333333334, 4.750011473429951), # 4
(2.7709552726563262, 7.162350775550646, 8.199576348721793, 6.504651645531401, 7.39893830128205, 4.748193123490338), # 5
(2.794197102721686, 7.231522727272727, 8.237323264781493, 6.535760869565218, 7.446346153846154, 4.746326086956522), # 6
(2.817050598722076, 7.299171846503226, 8.273891276599542, 6.565946180555556, 7.492383814102565, 4.744410590277778), # 7
(2.8394948907423667, 7.365217785493826, 8.309249589403029, 6.595178140096618, 7.537003205128205, 4.7424468599033816), # 8
(2.8615091088674274, 7.429580196496212, 8.343367408419024, 6.623427309782609, 7.580156249999999, 4.740435122282609), # 9
(2.8830723831821286, 7.492178731762065, 8.376213938874606, 6.65066425120773, 7.621794871794872, 4.738375603864734), # 10
(2.9041638437713395, 7.55293304354307, 8.407758385996857, 6.676859525966184, 7.661870993589743, 4.736268531099034), # 11
(2.92476262071993, 7.611762784090908, 8.437969955012854, 6.7019836956521734, 7.700336538461538, 4.734114130434782), # 12
(2.944847844112769, 7.668587605657268, 8.46681785114967, 6.726007321859903, 7.737143429487181, 4.731912628321256), # 13
(2.9643986440347283, 7.723327160493828, 8.494271279634388, 6.748900966183574, 7.772243589743589, 4.729664251207729), # 14
(2.9833941505706756, 7.775901100852272, 8.520299445694086, 6.770635190217391, 7.8055889423076925, 4.7273692255434785), # 15
(3.001813493805482, 7.826229078984287, 8.544871554555842, 6.791180555555555, 7.8371314102564105, 4.725027777777778), # 16
(3.019635803824017, 7.874230747141554, 8.567956811446729, 6.810507623792271, 7.866822916666667, 4.722640134359904), # 17
(3.03684021071115, 7.919825757575757, 8.589524421593831, 6.82858695652174, 7.894615384615387, 4.72020652173913), # 18
(3.053405844551751, 7.962933762538579, 8.609543590224222, 6.845389115338164, 7.9204607371794875, 4.717727166364734), # 19
(3.0693118354306894, 8.003474414281705, 8.62798352256498, 6.860884661835749, 7.944310897435898, 4.71520229468599), # 20
(3.084537313432836, 8.041367365056816, 8.644813423843189, 6.875044157608696, 7.9661177884615375, 4.712632133152174), # 21
(3.099061408643059, 8.076532267115601, 8.660002499285918, 6.887838164251208, 7.985833333333332, 4.710016908212561), # 22
(3.1128632511462295, 8.108888772709737, 8.673519954120252, 6.899237243357488, 8.003409455128205, 4.707356846316426), # 23
(3.125921971027217, 8.138356534090908, 8.685334993573264, 6.909211956521739, 8.018798076923076, 4.704652173913043), # 24
(3.1382166983708903, 8.164855203510802, 8.695416822872037, 6.917732865338165, 8.03195112179487, 4.701903117451691), # 25
(3.1497265632621207, 8.188304433221099, 8.703734647243644, 6.9247705314009655, 8.042820512820512, 4.699109903381642), # 26
(3.160430695785777, 8.208623875473483, 8.710257671915166, 6.930295516304349, 8.051358173076924, 4.696272758152174), # 27
(3.1703082260267292, 8.22573318251964, 8.714955102113683, 6.934278381642512, 8.057516025641025, 4.69339190821256), # 28
(3.1793382840698468, 8.239552006611252, 8.717796143066266, 6.936689689009662, 8.061245993589743, 4.690467580012077), # 29
(3.1875, 8.25, 8.71875, 6.9375, 8.0625, 4.6875), # 30
(3.1951370284526854, 8.258678799715907, 8.718034948671496, 6.937353656045752, 8.062043661347518, 4.683376259786773), # 31
(3.202609175191816, 8.267242897727273, 8.715910024154589, 6.93691748366013, 8.06068439716312, 4.677024758454107), # 32
(3.2099197969948845, 8.275691228693182, 8.712405570652175, 6.936195772058824, 8.058436835106383, 4.66850768365817), # 33
(3.217072250639386, 8.284022727272728, 8.70755193236715, 6.935192810457517, 8.05531560283688, 4.657887223055139), # 34
(3.224069892902813, 8.292236328124998, 8.701379453502415, 6.933912888071895, 8.051335328014185, 4.645225564301183), # 35
(3.23091608056266, 8.300330965909092, 8.69391847826087, 6.932360294117648, 8.046510638297873, 4.630584895052474), # 36
(3.2376141703964194, 8.308305575284091, 8.68519935084541, 6.9305393178104575, 8.040856161347516, 4.614027402965184), # 37
(3.2441675191815853, 8.31615909090909, 8.675252415458937, 6.9284542483660125, 8.034386524822695, 4.595615275695485), # 38
(3.250579483695652, 8.323890447443182, 8.664108016304347, 6.926109375, 8.027116356382978, 4.57541070089955), # 39
(3.2568534207161126, 8.331498579545455, 8.651796497584542, 6.923508986928105, 8.019060283687942, 4.5534758662335495), # 40
(3.26299268702046, 8.338982421874999, 8.638348203502416, 6.920657373366013, 8.010232934397163, 4.529872959353657), # 41
(3.269000639386189, 8.34634090909091, 8.62379347826087, 6.917558823529411, 8.000648936170213, 4.504664167916042), # 42
(3.2748806345907933, 8.353572975852272, 8.608162666062801, 6.914217626633987, 7.990322916666666, 4.477911679576878), # 43
(3.2806360294117645, 8.360677556818182, 8.591486111111111, 6.910638071895424, 7.979269503546099, 4.449677681992337), # 44
(3.286270180626598, 8.367653586647727, 8.573794157608697, 6.906824448529411, 7.967503324468085, 4.420024362818591), # 45
(3.291786445012788, 8.374500000000001, 8.555117149758455, 6.902781045751634, 7.955039007092199, 4.389013909711811), # 46
(3.297188179347826, 8.381215731534091, 8.535485431763284, 6.898512152777777, 7.941891179078015, 4.356708510328169), # 47
(3.3024787404092075, 8.387799715909091, 8.514929347826087, 6.894022058823529, 7.928074468085106, 4.323170352323839), # 48
(3.307661484974424, 8.39425088778409, 8.493479242149759, 6.889315053104576, 7.91360350177305, 4.288461623354989), # 49
(3.312739769820972, 8.40056818181818, 8.471165458937199, 6.884395424836602, 7.898492907801418, 4.252644511077794), # 50
(3.317716951726343, 8.406750532670454, 8.448018342391304, 6.879267463235294, 7.882757313829787, 4.215781203148426), # 51
(3.322596387468031, 8.412796875, 8.424068236714975, 6.87393545751634, 7.86641134751773, 4.177933887223055), # 52
(3.3273814338235295, 8.41870614346591, 8.39934548611111, 6.868403696895425, 7.849469636524823, 4.139164750957854), # 53
(3.332075447570333, 8.424477272727271, 8.373880434782608, 6.8626764705882355, 7.831946808510638, 4.099535982008995), # 54
(3.336681785485933, 8.430109197443182, 8.347703426932366, 6.856758067810458, 7.813857491134752, 4.05910976803265), # 55
(3.341203804347826, 8.435600852272726, 8.320844806763285, 6.8506527777777775, 7.795216312056738, 4.017948296684991), # 56
(3.345644860933504, 8.440951171875001, 8.29333491847826, 6.844364889705882, 7.77603789893617, 3.9761137556221886), # 57
(3.3500083120204605, 8.44615909090909, 8.265204106280192, 6.837898692810458, 7.756336879432624, 3.9336683325004165), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(2, 4, 3, 2, 1, 0, 9, 9, 6, 2, 1, 0), # 0
(4, 9, 4, 2, 3, 0, 11, 12, 9, 4, 2, 0), # 1
(8, 12, 9, 4, 5, 0, 17, 17, 14, 6, 3, 0), # 2
(9, 15, 17, 7, 5, 0, 25, 23, 14, 9, 6, 0), # 3
(13, 21, 17, 9, 7, 0, 27, 27, 14, 10, 7, 0), # 4
(16, 30, 21, 12, 8, 0, 30, 34, 17, 13, 8, 0), # 5
(20, 34, 28, 14, 9, 0, 35, 41, 22, 17, 8, 0), # 6
(22, 38, 29, 15, 9, 0, 40, 45, 25, 22, 8, 0), # 7
(25, 45, 32, 19, 10, 0, 45, 48, 27, 26, 8, 0), # 8
(27, 51, 34, 20, 12, 0, 53, 54, 29, 27, 9, 0), # 9
(29, 57, 37, 21, 14, 0, 57, 61, 36, 30, 12, 0), # 10
(30, 68, 39, 24, 16, 0, 59, 65, 41, 33, 13, 0), # 11
(34, 75, 42, 25, 18, 0, 60, 70, 44, 38, 15, 0), # 12
(37, 82, 50, 26, 21, 0, 66, 76, 46, 42, 19, 0), # 13
(40, 85, 52, 28, 22, 0, 71, 79, 54, 49, 19, 0), # 14
(44, 92, 55, 30, 23, 0, 78, 89, 59, 55, 20, 0), # 15
(44, 101, 61, 36, 25, 0, 87, 98, 63, 65, 20, 0), # 16
(48, 105, 64, 39, 26, 0, 94, 104, 67, 71, 21, 0), # 17
(56, 111, 67, 41, 27, 0, 98, 109, 71, 76, 24, 0), # 18
(57, 116, 72, 43, 29, 0, 99, 114, 73, 79, 27, 0), # 19
(61, 124, 79, 45, 32, 0, 102, 122, 77, 86, 29, 0), # 20
(65, 130, 84, 45, 33, 0, 104, 128, 82, 92, 30, 0), # 21
(68, 137, 90, 47, 33, 0, 107, 132, 86, 93, 34, 0), # 22
(73, 148, 95, 51, 34, 0, 110, 142, 88, 98, 35, 0), # 23
(77, 153, 103, 56, 37, 0, 112, 153, 91, 102, 35, 0), # 24
(82, 160, 104, 60, 38, 0, 115, 160, 92, 106, 37, 0), # 25
(86, 165, 110, 64, 40, 0, 119, 168, 96, 107, 38, 0), # 26
(87, 170, 118, 67, 43, 0, 127, 170, 100, 108, 39, 0), # 27
(89, 175, 121, 69, 44, 0, 131, 179, 100, 112, 41, 0), # 28
(94, 181, 127, 75, 48, 0, 141, 187, 104, 114, 43, 0), # 29
(95, 185, 130, 79, 48, 0, 145, 189, 108, 120, 44, 0), # 30
(97, 191, 133, 81, 52, 0, 150, 194, 117, 121, 48, 0), # 31
(98, 201, 137, 81, 53, 0, 153, 201, 122, 129, 50, 0), # 32
(102, 206, 140, 84, 54, 0, 162, 211, 126, 131, 52, 0), # 33
(107, 211, 145, 85, 58, 0, 170, 218, 127, 131, 55, 0), # 34
(110, 218, 148, 89, 58, 0, 173, 220, 132, 135, 57, 0), # 35
(115, 227, 152, 94, 59, 0, 180, 227, 135, 136, 57, 0), # 36
(117, 234, 158, 96, 62, 0, 185, 237, 138, 138, 58, 0), # 37
(119, 242, 162, 98, 64, 0, 192, 247, 141, 140, 61, 0), # 38
(121, 249, 166, 100, 68, 0, 197, 256, 149, 140, 64, 0), # 39
(124, 255, 170, 102, 70, 0, 198, 267, 157, 143, 67, 0), # 40
(126, 266, 177, 105, 74, 0, 201, 275, 158, 145, 70, 0), # 41
(132, 275, 179, 108, 75, 0, 208, 281, 163, 152, 73, 0), # 42
(138, 277, 182, 112, 75, 0, 213, 286, 168, 160, 73, 0), # 43
(139, 282, 186, 116, 76, 0, 218, 293, 173, 161, 73, 0), # 44
(141, 287, 192, 117, 79, 0, 222, 294, 178, 163, 75, 0), # 45
(147, 293, 197, 122, 82, 0, 226, 301, 181, 167, 75, 0), # 46
(148, 296, 202, 127, 83, 0, 233, 311, 188, 168, 76, 0), # 47
(149, 301, 204, 128, 85, 0, 235, 317, 193, 171, 76, 0), # 48
(152, 307, 210, 132, 85, 0, 237, 319, 196, 177, 76, 0), # 49
(157, 313, 214, 134, 86, 0, 241, 330, 199, 181, 77, 0), # 50
(161, 321, 219, 138, 87, 0, 248, 336, 202, 183, 79, 0), # 51
(165, 334, 221, 139, 89, 0, 254, 346, 206, 187, 79, 0), # 52
(170, 347, 226, 141, 93, 0, 256, 349, 210, 190, 81, 0), # 53
(174, 354, 229, 142, 94, 0, 258, 352, 212, 192, 82, 0), # 54
(180, 366, 233, 146, 96, 0, 260, 358, 216, 194, 83, 0), # 55
(183, 376, 236, 146, 100, 0, 261, 362, 223, 196, 84, 0), # 56
(184, 385, 244, 148, 100, 0, 263, 366, 227, 202, 86, 0), # 57
(186, 397, 248, 150, 101, 0, 265, 372, 235, 206, 90, 0), # 58
(186, 397, 248, 150, 101, 0, 265, 372, 235, 206, 90, 0), # 59
)
passenger_arriving_rate = (
(2.649651558384548, 5.43716856060606, 4.79654161311054, 2.534510869565217, 1.428605769230769, 0.0, 4.75679347826087, 5.714423076923076, 3.801766304347826, 3.1976944087403596, 1.359292140151515, 0.0), # 0
(2.6745220100478, 5.497633278970258, 4.822449322514997, 2.5486257548309177, 1.439313301282051, 0.0, 4.7551721391908215, 5.757253205128204, 3.8229386322463768, 3.2149662150099974, 1.3744083197425645, 0.0), # 1
(2.699108477221734, 5.557201122334455, 4.8477420736932295, 2.562429951690821, 1.4497948717948717, 0.0, 4.753501207729468, 5.799179487179487, 3.8436449275362317, 3.23182804912882, 1.3893002805836137, 0.0), # 2
(2.72339008999122, 5.6158078125, 4.872401389781491, 2.575911684782608, 1.4600408653846155, 0.0, 4.7517809103260875, 5.840163461538462, 3.863867527173912, 3.2482675931876606, 1.403951953125, 0.0), # 3
(2.747345978441128, 5.673389071268238, 4.896408793916024, 2.589059178743961, 1.4700416666666667, 0.0, 4.750011473429951, 5.880166666666667, 3.883588768115942, 3.2642725292773487, 1.4183472678170594, 0.0), # 4
(2.7709552726563262, 5.729880620440516, 4.919745809233076, 2.6018606582125603, 1.47978766025641, 0.0, 4.748193123490338, 5.91915064102564, 3.9027909873188404, 3.279830539488717, 1.432470155110129, 0.0), # 5
(2.794197102721686, 5.785218181818181, 4.942393958868895, 2.614304347826087, 1.4892692307692306, 0.0, 4.746326086956522, 5.957076923076922, 3.9214565217391306, 3.294929305912597, 1.4463045454545453, 0.0), # 6
(2.817050598722076, 5.83933747720258, 4.964334765959725, 2.626378472222222, 1.498476762820513, 0.0, 4.744410590277778, 5.993907051282052, 3.939567708333333, 3.309556510639817, 1.459834369300645, 0.0), # 7
(2.8394948907423667, 5.89217422839506, 4.985549753641817, 2.638071256038647, 1.5074006410256409, 0.0, 4.7424468599033816, 6.0296025641025635, 3.9571068840579704, 3.3236998357612113, 1.473043557098765, 0.0), # 8
(2.8615091088674274, 5.943664157196969, 5.006020445051414, 2.649370923913043, 1.5160312499999997, 0.0, 4.740435122282609, 6.064124999999999, 3.9740563858695652, 3.3373469633676094, 1.4859160392992423, 0.0), # 9
(2.8830723831821286, 5.993742985409652, 5.025728363324764, 2.660265700483092, 1.5243589743589743, 0.0, 4.738375603864734, 6.097435897435897, 3.990398550724638, 3.3504855755498424, 1.498435746352413, 0.0), # 10
(2.9041638437713395, 6.042346434834456, 5.044655031598114, 2.6707438103864733, 1.5323741987179484, 0.0, 4.736268531099034, 6.129496794871794, 4.0061157155797105, 3.3631033543987425, 1.510586608708614, 0.0), # 11
(2.92476262071993, 6.089410227272726, 5.062781973007712, 2.680793478260869, 1.5400673076923075, 0.0, 4.734114130434782, 6.16026923076923, 4.021190217391304, 3.375187982005141, 1.5223525568181815, 0.0), # 12
(2.944847844112769, 6.134870084525814, 5.080090710689802, 2.690402928743961, 1.547428685897436, 0.0, 4.731912628321256, 6.189714743589744, 4.035604393115942, 3.386727140459868, 1.5337175211314535, 0.0), # 13
(2.9643986440347283, 6.1786617283950624, 5.096562767780632, 2.699560386473429, 1.5544487179487176, 0.0, 4.729664251207729, 6.217794871794871, 4.049340579710144, 3.397708511853755, 1.5446654320987656, 0.0), # 14
(2.9833941505706756, 6.220720880681816, 5.112179667416451, 2.708254076086956, 1.5611177884615384, 0.0, 4.7273692255434785, 6.2444711538461535, 4.062381114130434, 3.408119778277634, 1.555180220170454, 0.0), # 15
(3.001813493805482, 6.26098326318743, 5.126922932733505, 2.716472222222222, 1.5674262820512819, 0.0, 4.725027777777778, 6.2697051282051275, 4.074708333333333, 3.4179486218223363, 1.5652458157968574, 0.0), # 16
(3.019635803824017, 6.299384597713242, 5.140774086868038, 2.724203049516908, 1.5733645833333332, 0.0, 4.722640134359904, 6.293458333333333, 4.0863045742753625, 3.4271827245786914, 1.5748461494283106, 0.0), # 17
(3.03684021071115, 6.3358606060606055, 5.153714652956299, 2.7314347826086958, 1.578923076923077, 0.0, 4.72020652173913, 6.315692307692308, 4.097152173913043, 3.435809768637532, 1.5839651515151514, 0.0), # 18
(3.053405844551751, 6.370347010030863, 5.165726154134533, 2.738155646135265, 1.5840921474358973, 0.0, 4.717727166364734, 6.336368589743589, 4.107233469202898, 3.4438174360896885, 1.5925867525077158, 0.0), # 19
(3.0693118354306894, 6.402779531425363, 5.1767901135389875, 2.7443538647342995, 1.5888621794871793, 0.0, 4.71520229468599, 6.355448717948717, 4.11653079710145, 3.4511934090259917, 1.6006948828563408, 0.0), # 20
(3.084537313432836, 6.433093892045452, 5.186888054305913, 2.750017663043478, 1.5932235576923073, 0.0, 4.712632133152174, 6.372894230769229, 4.125026494565217, 3.4579253695372754, 1.608273473011363, 0.0), # 21
(3.099061408643059, 6.46122581369248, 5.19600149957155, 2.7551352657004826, 1.5971666666666662, 0.0, 4.710016908212561, 6.388666666666665, 4.132702898550725, 3.464000999714367, 1.61530645342312, 0.0), # 22
(3.1128632511462295, 6.487111018167789, 5.204111972472151, 2.759694897342995, 1.6006818910256408, 0.0, 4.707356846316426, 6.402727564102563, 4.139542346014493, 3.4694079816481005, 1.6217777545419472, 0.0), # 23
(3.125921971027217, 6.5106852272727265, 5.211200996143958, 2.763684782608695, 1.6037596153846152, 0.0, 4.704652173913043, 6.415038461538461, 4.1455271739130435, 3.474133997429305, 1.6276713068181816, 0.0), # 24
(3.1382166983708903, 6.531884162808641, 5.217250093723222, 2.7670931461352657, 1.606390224358974, 0.0, 4.701903117451691, 6.425560897435896, 4.150639719202899, 3.4781667291488145, 1.6329710407021603, 0.0), # 25
(3.1497265632621207, 6.550643546576878, 5.222240788346187, 2.7699082125603858, 1.6085641025641022, 0.0, 4.699109903381642, 6.434256410256409, 4.154862318840579, 3.4814938588974575, 1.6376608866442195, 0.0), # 26
(3.160430695785777, 6.566899100378786, 5.226154603149099, 2.772118206521739, 1.6102716346153847, 0.0, 4.696272758152174, 6.441086538461539, 4.158177309782609, 3.484103068766066, 1.6417247750946966, 0.0), # 27
(3.1703082260267292, 6.580586546015712, 5.228973061268209, 2.7737113526570045, 1.6115032051282048, 0.0, 4.69339190821256, 6.446012820512819, 4.160567028985507, 3.4859820408454727, 1.645146636503928, 0.0), # 28
(3.1793382840698468, 6.591641605289001, 5.230677685839759, 2.7746758756038647, 1.6122491987179486, 0.0, 4.690467580012077, 6.448996794871794, 4.162013813405797, 3.487118457226506, 1.6479104013222503, 0.0), # 29
(3.1875, 6.6, 5.23125, 2.775, 1.6124999999999998, 0.0, 4.6875, 6.449999999999999, 4.1625, 3.4875, 1.65, 0.0), # 30
(3.1951370284526854, 6.606943039772726, 5.230820969202898, 2.7749414624183006, 1.6124087322695035, 0.0, 4.683376259786773, 6.449634929078014, 4.162412193627451, 3.4872139794685983, 1.6517357599431814, 0.0), # 31
(3.202609175191816, 6.613794318181818, 5.229546014492753, 2.7747669934640515, 1.6121368794326238, 0.0, 4.677024758454107, 6.448547517730495, 4.162150490196078, 3.4863640096618354, 1.6534485795454545, 0.0), # 32
(3.2099197969948845, 6.620552982954545, 5.227443342391305, 2.774478308823529, 1.6116873670212764, 0.0, 4.66850768365817, 6.446749468085105, 4.161717463235294, 3.4849622282608697, 1.6551382457386363, 0.0), # 33
(3.217072250639386, 6.627218181818182, 5.224531159420289, 2.7740771241830067, 1.6110631205673758, 0.0, 4.657887223055139, 6.444252482269503, 4.16111568627451, 3.4830207729468596, 1.6568045454545455, 0.0), # 34
(3.224069892902813, 6.633789062499998, 5.220827672101449, 2.773565155228758, 1.6102670656028368, 0.0, 4.645225564301183, 6.441068262411347, 4.160347732843137, 3.480551781400966, 1.6584472656249996, 0.0), # 35
(3.23091608056266, 6.6402647727272734, 5.2163510869565215, 2.7729441176470586, 1.6093021276595745, 0.0, 4.630584895052474, 6.437208510638298, 4.159416176470589, 3.477567391304347, 1.6600661931818184, 0.0), # 36
(3.2376141703964194, 6.6466444602272725, 5.211119610507246, 2.7722157271241827, 1.6081712322695032, 0.0, 4.614027402965184, 6.432684929078013, 4.158323590686274, 3.474079740338164, 1.6616611150568181, 0.0), # 37
(3.2441675191815853, 6.652927272727272, 5.205151449275362, 2.7713816993464047, 1.6068773049645388, 0.0, 4.595615275695485, 6.427509219858155, 4.157072549019607, 3.4701009661835744, 1.663231818181818, 0.0), # 38
(3.250579483695652, 6.659112357954545, 5.198464809782608, 2.7704437499999996, 1.6054232712765955, 0.0, 4.57541070089955, 6.421693085106382, 4.155665625, 3.4656432065217384, 1.6647780894886361, 0.0), # 39
(3.2568534207161126, 6.6651988636363635, 5.191077898550724, 2.7694035947712417, 1.6038120567375882, 0.0, 4.5534758662335495, 6.415248226950353, 4.154105392156863, 3.4607185990338163, 1.6662997159090909, 0.0), # 40
(3.26299268702046, 6.671185937499998, 5.1830089221014495, 2.768262949346405, 1.6020465868794325, 0.0, 4.529872959353657, 6.40818634751773, 4.152394424019608, 3.455339281400966, 1.6677964843749995, 0.0), # 41
(3.269000639386189, 6.677072727272728, 5.174276086956522, 2.767023529411764, 1.6001297872340425, 0.0, 4.504664167916042, 6.40051914893617, 4.150535294117646, 3.4495173913043478, 1.669268181818182, 0.0), # 42
(3.2748806345907933, 6.682858380681817, 5.164897599637681, 2.7656870506535944, 1.5980645833333331, 0.0, 4.477911679576878, 6.3922583333333325, 4.148530575980392, 3.4432650664251203, 1.6707145951704543, 0.0), # 43
(3.2806360294117645, 6.688542045454545, 5.154891666666667, 2.7642552287581696, 1.5958539007092198, 0.0, 4.449677681992337, 6.383415602836879, 4.146382843137254, 3.4365944444444443, 1.6721355113636363, 0.0), # 44
(3.286270180626598, 6.694122869318181, 5.144276494565218, 2.7627297794117642, 1.593500664893617, 0.0, 4.420024362818591, 6.374002659574468, 4.144094669117647, 3.4295176630434785, 1.6735307173295453, 0.0), # 45
(3.291786445012788, 6.6996, 5.133070289855073, 2.761112418300653, 1.5910078014184397, 0.0, 4.389013909711811, 6.364031205673759, 4.14166862745098, 3.4220468599033818, 1.6749, 0.0), # 46
(3.297188179347826, 6.704972585227273, 5.12129125905797, 2.759404861111111, 1.588378235815603, 0.0, 4.356708510328169, 6.353512943262412, 4.139107291666666, 3.4141941727053133, 1.6762431463068181, 0.0), # 47
(3.3024787404092075, 6.710239772727273, 5.108957608695651, 2.757608823529411, 1.5856148936170211, 0.0, 4.323170352323839, 6.3424595744680845, 4.136413235294117, 3.4059717391304343, 1.6775599431818182, 0.0), # 48
(3.307661484974424, 6.715400710227271, 5.096087545289855, 2.75572602124183, 1.5827207003546098, 0.0, 4.288461623354989, 6.330882801418439, 4.133589031862745, 3.3973916968599034, 1.6788501775568176, 0.0), # 49
(3.312739769820972, 6.720454545454543, 5.082699275362319, 2.7537581699346405, 1.5796985815602835, 0.0, 4.252644511077794, 6.318794326241134, 4.130637254901961, 3.388466183574879, 1.6801136363636358, 0.0), # 50
(3.317716951726343, 6.725400426136363, 5.068811005434783, 2.7517069852941174, 1.5765514627659571, 0.0, 4.215781203148426, 6.306205851063829, 4.127560477941176, 3.3792073369565214, 1.6813501065340908, 0.0), # 51
(3.322596387468031, 6.730237499999999, 5.054440942028985, 2.7495741830065357, 1.573282269503546, 0.0, 4.177933887223055, 6.293129078014184, 4.124361274509804, 3.3696272946859898, 1.6825593749999999, 0.0), # 52
(3.3273814338235295, 6.7349649147727275, 5.039607291666666, 2.7473614787581697, 1.5698939273049646, 0.0, 4.139164750957854, 6.279575709219858, 4.121042218137255, 3.359738194444444, 1.6837412286931819, 0.0), # 53
(3.332075447570333, 6.739581818181817, 5.024328260869565, 2.745070588235294, 1.5663893617021276, 0.0, 4.099535982008995, 6.2655574468085105, 4.117605882352941, 3.3495521739130427, 1.6848954545454542, 0.0), # 54
(3.336681785485933, 6.744087357954545, 5.008622056159419, 2.7427032271241827, 1.5627714982269503, 0.0, 4.05910976803265, 6.251085992907801, 4.114054840686275, 3.3390813707729463, 1.6860218394886362, 0.0), # 55
(3.341203804347826, 6.74848068181818, 4.9925068840579705, 2.740261111111111, 1.5590432624113475, 0.0, 4.017948296684991, 6.23617304964539, 4.110391666666667, 3.328337922705314, 1.687120170454545, 0.0), # 56
(3.345644860933504, 6.752760937500001, 4.976000951086956, 2.7377459558823527, 1.5552075797872338, 0.0, 3.9761137556221886, 6.220830319148935, 4.106618933823529, 3.317333967391304, 1.6881902343750002, 0.0), # 57
(3.3500083120204605, 6.756927272727271, 4.959122463768115, 2.7351594771241827, 1.5512673758865245, 0.0, 3.9336683325004165, 6.205069503546098, 4.102739215686275, 3.3060816425120767, 1.6892318181818178, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
71, # 1
)
| 112.471642 | 215 | 0.727586 |
acdfde94a8197159c7eaef4c002ca194ef474b55 | 20 | py | Python | imaginarium/storage/user/__init__.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | imaginarium/storage/user/__init__.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | imaginarium/storage/user/__init__.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | tablename = 'users'
| 10 | 19 | 0.7 |
acdfdf65879165b481901418bd55522afd837105 | 96 | py | Python | src/softfab/docs/howto/__init__.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 20 | 2019-02-07T17:03:04.000Z | 2020-03-16T20:45:19.000Z | src/softfab/docs/howto/__init__.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 36 | 2019-02-11T08:57:16.000Z | 2020-09-29T05:32:08.000Z | src/softfab/docs/howto/__init__.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
button = 'How-to'
children = ('ci',)
icon = 'IconDocs'
| 16 | 39 | 0.666667 |
acdfdfbde6d8ebe5d052c29decc706fc09d6efeb | 11,307 | py | Python | src/unwetter/generate/__init__.py | corneliusroemer/unwetter | 9e27d0382483ba4f238db555c4ec4742f8ce08ae | [
"MIT"
] | 1 | 2020-06-27T09:59:09.000Z | 2020-06-27T09:59:09.000Z | src/unwetter/generate/__init__.py | corneliusroemer/unwetter | 9e27d0382483ba4f238db555c4ec4742f8ce08ae | [
"MIT"
] | null | null | null | src/unwetter/generate/__init__.py | corneliusroemer/unwetter | 9e27d0382483ba4f238db555c4ec4742f8ce08ae | [
"MIT"
] | null | null | null | #!/user/bin/env python3.6
import os
import re
from .. import db
from .blocks import *
from .helpers import rreplace, upper_first
from . import urls
from ..config import SEVERITY_FILTER, STATES_FILTER
def describe_new_event(event):
text = f'''
{title(event, variant='wina_body')}
+++ Gültigkeit +++
{upper_first(dates(event))}.
Regionale Zuordnung: {region_list(event)}
Betroffene Kreise und Städte: {district_list(event)}
Karten-Grafik Download:
{urls.events(event)}
+++ Wetterlage +++
Warnstufe: {severities[event['severity']]}
{event['description']}
Warnung vor: {parameters(event)}
Verhaltenshinweise: {event['instruction'] or ''}
+++ Textvorschläge +++
HINWEIS: Textvorschläge werden nach redaktionellen Vorgaben automatisch generiert.
Je nach Unwetterlage ist es nötig, sie noch einmal passgenau zu überarbeiten und
dabei auch die eventuellen Warnungen vor verschiedenen Unwettergebieten zusammenzufassen.
TWEET: {tweet(event)}
TV-CRAWL: {crawl(event)}
RADIO: {radio(event)}
+++ DWD +++
Die Eilmeldung des DWD erreicht OpenMedia in der Regel wenige Minuten nach dieser Meldung.
(In einigen Fällen, z.B. kurze Gültigkeit und/oder kleines Gebiet, kann eine Meldung des DWD entfallen!)
Website des Deutschen Wetterdienstes:
https://www.dwd.de/DE/wetter/warnungen/warnWetter_node.html
Telefon DWD: 069-8062-6900
+++ Allgemeine Information +++
Die aufgeführten Informationen dürfen als Quelle zur Abwicklung des Unwetter-Workflows genutzt werden.
Die Bereitstellung dieser Information erfolgt durch den Unwetter-Warnassistenten (UWA), ein Produkt des Newsrooms.
Der UWA wird aktiv weiterentwickelt.
Kontakt und weitere Informationen: {os.environ["WDR_PROJECT_INFO_URL"]}
'''.strip()
for optional in ['Regionale Zuordnung:', 'Warnung vor:', 'Verhaltenshinweise:']:
text = text.replace(f'{optional} \n\n', '')
return text
def describe_update(event):
old_events = list(db.by_ids([
change_set['id'] for change_set
in event['has_changes'] if change_set['published']
]))
change_details = []
for old_event in old_events:
old_time = local_time(old_event['sent']).strftime("%H:%M %d.%m.%y")
if event['msg_type'] == 'Cancel' or event['response_type'] == 'AllClear':
change_title = 'Aufhebung von'
the_changes = ''
elif event['special_type'] == 'Irrelevant' and event['severity'] not in SEVERITY_FILTER:
change_title = 'Herabstufung von'
the_changes = (changes(event, old_event) if old_event else 'Unbekannt')
elif event['special_type'] == 'Irrelevant' and not any(state in event['states'] for state in STATES_FILTER):
change_title = 'Änderungen zur'
the_changes = f'Die Unwetterzelle befindet sich nicht ' \
f'mehr im Bundesland {", ".join(STATES_FILTER)}.\n\n ' \
f'Andere Unwetterregionen kônnen noch in NRW aktiv sein! ' \
f'Vergleiche dazu die UWA und DWD Karten.'
else:
change_title = 'Änderungen zur'
the_changes = (changes(event, old_event) if old_event else 'Unbekannt') + '\n'
change_details.append(f'''
+++ {change_title} Meldung mit Agenturzeit {old_time} +++
{the_changes}'''.strip())
joined = '\n\n'.join(change_details)
all_changes = f'\n{joined}\n' if change_details else ''
text = f'''
{title(event, variant='wina_body')}
{all_changes}
+++ Gültigkeit +++
{upper_first(dates(event))}.
Regionale Zuordnung: {region_list(event)}
Betroffene Kreise und Städte: {district_list(event)}
Karten-Grafik Download:
{urls.events(event)}
+++ Wetterlage +++
Warnstufe: {severities[event['severity']]}
{event['description']}
Warnung vor: {parameters(event)}
Verhaltenshinweise: {event['instruction'] or ''}
+++ Textvorschläge +++
HINWEIS: Textvorschläge werden nach redaktionellen Vorgaben automatisch generiert.
Je nach Unwetterlage ist es nötig, sie noch einmal passgenau zu überarbeiten
und dabei auch die eventuellen Warnungen vor verschiedenen Unwettergebieten zusammenzufassen.
TWEET: {tweet(event)}
TV-CRAWL: {crawl(event)}
RADIO: {radio(event)}
+++ DWD +++
Die Eilmeldung des DWD erreicht OpenMedia in der Regel wenige Minuten nach dieser Meldung.
(In einigen Fällen, z.B. kurze Gültigkeit und/oder kleines Gebiet, kann eine Meldung des DWD entfallen!)
Website des Deutschen Wetterdienstes:
https://www.dwd.de/DE/wetter/warnungen/warnWetter_node.html
Telefon DWD: 069-8062-6900
+++ Allgemeine Information +++
Die aufgeführten Informationen dürfen als Quelle zur Abwicklung des Unwetter-Workflows genutzt werden.
Die Bereitstellung dieser Information erfolgt durch den Unwetter-Warnassistenten (UWA), ein Produkt des Newsrooms.
Der UWA wird aktiv weiterentwickelt.
Kontakt und weitere Informationen: {os.environ["WDR_PROJECT_INFO_URL"]}
'''.strip()
for optional in ['Regionale Zuordnung:', 'Warnung vor:', 'Verhaltenshinweise:']:
text = text.replace(f'{optional} \n\n', '')
return text
def description(event, short=False):
"""
Return main body text
"""
if event['msg_type'] == 'Alert' or event['special_type'] == 'UpdateAlert':
text = describe_new_event(event)
else:
text = describe_update(event)
if short:
# Delete generated texts
text = text.split('+++ Textvorschläge +++')[0].strip()
return text
def crawl(event):
prefix = 'aktualisierte' if event['msg_type'] == 'Update' else ''
warning = 'Unwetterwarnung' if event['severity'] in ('Severe', 'Extreme') else 'Wetterwarnung'
if len(event['districts']) < 3:
location = district_list(event)
else:
location = region_list(event, accusative=True)
if not location:
location = district_list(event)
location = rreplace(location, ', ', ' und ', 1)
text = ' '.join(
word.capitalize() if word.isupper() else word
for word in event['event'].split()
).replace('Schwer', 'schwer')
for lower_word in ('Heftig', 'Schwer', 'Starke', 'Extrem'):
text = text.replace(lower_word, lower_word.lower())
text = upper_first(text)
if event['msg_type'] == 'Cancel':
the_crawl = (f'Amtliche {warning} des Deutschen Wetterdienstes für '
f'{location} zurückgezogen.')
elif event['response_type'] == 'AllClear':
the_crawl = (f'Amtliche {warning} des Deutschen Wetterdienstes für '
f'{location} vorzeitig aufgehoben.')
else:
the_crawl = (f'{prefix} amtliche {warning} des Deutschen Wetterdienstes für '
f'{location}. {text} möglich. Gültig {dates(event)}.'
'[Weitere Infos (nächste TV-Sendung, NUR wenn weniger als 2h bis '
'Sendungsbeginn), auf wdr.de und im Videotext auf S. 192.]').strip()
the_crawl = the_crawl.replace(' (kein Ende der Gültigkeit angegeben)', '')
return upper_first(the_crawl)
def tweet(event):
prefix = 'aktualisierte' if event['msg_type'] == 'Update' else ''
warning = 'Unwetterwarnung' if event['severity'] in ('Severe', 'Extreme') else 'Wetterwarnung'
text = ' '.join(
word.capitalize() if word.isupper() else word
for word in event['event'].split()
).replace('Schwer', 'schwer')
for lower_word in ('Heftig', 'Schwer', 'Starke', 'Extrem'):
text = text.replace(lower_word, lower_word.lower())
text = upper_first(text)
districts = district_list(event)
districts = rreplace(districts, ', ', ' und ', 1)
regions_ = region_list(event, accusative=True)
regions_ = rreplace(regions_, ', ', ' und ', 1)
dates_ = dates(event)
if event['msg_type'] == 'Cancel':
candidates = [
'Amtliche #{warning} des Deutschen Wetterdienstes für '
'{areas} zurückgezogen.',
]
elif event['response_type'] == 'AllClear':
candidates = [
'Amtliche #{warning} des Deutschen Wetterdienstes für '
'{areas} vorzeitig aufgehoben.',
]
else:
candidates = [
'{prefix} amtliche #{warning} des Deutschen Wetterdienstes für '
'{districts}. {text} möglich. Gültig {dates}.',
'{prefix} amtliche {warning} des Deutschen Wetterdienstes für '
'{districts}. {text} möglich. Gültig {dates}.',
'{prefix} amtliche #{warning} des Deutschen Wetterdienstes für '
'{regions}. {text} möglich. Gültig {dates}.',
'{prefix} amtliche {warning} des Deutschen Wetterdienstes für '
'{regions}. {text} möglich. Gültig {dates}.',
]
for candidate in candidates:
the_tweet = candidate.format(
prefix=prefix,
warning=warning,
districts=districts,
regions=regions_,
text=text,
dates=dates_)
the_tweet = the_tweet.replace(' (kein Ende der Gültigkeit angegeben)', '').strip()
if len(the_tweet) <= 280:
break
else:
the_tweet = 'Tweet konnte nicht generiert werden, da zu lang'
return upper_first(the_tweet)
def radio(event):
districts = district_list(event)
districts = rreplace(districts, ', ', ' und ', 1)
regions_ = region_list(event, accusative=True)
regions_ = rreplace(regions_, ', ', ' und ', 1)
if len(district_list(event)) <= 3:
regions = districts
else:
regions = regions_
kind = re.sub(r'.*vor ', '', event['headline'])
if event['instruction'] and 'Freien!' in event['instruction']:
instruction = '\nVermeiden Sie dann möglichst den Aufenthalt im Freien.\n'
else:
instruction = ''
if event['severity'] == 'Extreme':
start_indexes = [m.start() for m in re.finditer('EXTREM', event["event"])]
extreme_parameters = []
for index in start_indexes:
param = event['event'][index:]
for delim in [',', 'und', 'mit']:
param = param.split(delim)[0]
extreme_parameters.append(param.strip().split(' ')[-1])
extreme_text = f'\nBei der Warnung vor {" und ".join(extreme_parameters)}' \
f' gilt im Moment die HÖCHSTMÖGLICHE WARNSTUFE.\n'
else:
extreme_text = ''
parameter_text = ''
params = {param: (value.replace("[", "").replace("]", "")) for param, value in event['parameters'].items()}
for param, value in params.items():
if param == 'Niederschlag':
rain = value.replace('in 1h', 'pro Stunde').replace('in 6h', 'in 6 Stunden')
parameter_text += f"\nEs kann bis zu {rain} regnen."
if param == 'Böen':
parameter_text += f"\nDie Sturmböen können Geschwindigkeiten von bis zu {value} erreichen."
if param == 'Schneefall':
parameter_text += f"\nEs können bis zu {value} Schnee pro Stunde fallen."
radio_text = f'''
Das Wetter in Nordrhein-Westfalen mit einer Unwetterwarnung des Deutschen Wetterdienstes – und zwar für {regions}.
Dort kommt es zu {kind}.
{extreme_text}{instruction}{parameter_text}
Mehr Informationen zur Unwetterlage in NRW gibt es (hier entsprechenden Teaser einfügen (z.B. wdr.de, TV-Sondersendung.)
'''.strip()
return radio_text
| 32.398281 | 120 | 0.64889 |
acdfe22a778719e9c21c7cf194a144da35a19cd9 | 3,088 | py | Python | run.py | googleinterns/geobeam | 846010085a58a5ee87f45e0be38a128216dc0eca | [
"Apache-2.0"
] | 1 | 2020-06-24T20:29:58.000Z | 2020-06-24T20:29:58.000Z | run.py | googleinterns/geobeam | 846010085a58a5ee87f45e0be38a128216dc0eca | [
"Apache-2.0"
] | 1 | 2020-06-26T20:01:30.000Z | 2020-07-23T15:20:33.000Z | run.py | googleinterns/geobeam | 846010085a58a5ee87f45e0be38a128216dc0eca | [
"Apache-2.0"
] | 1 | 2020-06-25T16:12:19.000Z | 2020-06-25T16:12:19.000Z | #!/usr/bin/env python3
import configparser
import os
import sys
from geobeam.simulations import SimulationSetBuilder
from geobeam.generate_route import TimedRoute
from geobeam import gps_utils
# speed used as default config parser value if not specified by the user
DEFAULT_SPEED = "1.4" # meters/sec
DEFAULT_FREQUENCY = 10 # Hz
def main(config_file_name):
"""Create and run simulation set based on user specified config file.
Args:
config_file_name: string, name of file in simulation_configs folder
to read from
"""
config = configparser.ConfigParser()
config['DEFAULT']['Speed'] = DEFAULT_SPEED
config_file_path = os.path.abspath("simulation_configs/" + config_file_name)
config.read(config_file_path)
sections = config.sections()
simulation_set_builder = SimulationSetBuilder()
for simulation in sections:
try:
run_duration = config.getint(simulation, "RunDuration", fallback=None)
gain = config.getint(simulation, "Gain", fallback=None)
# Dynamic Simulation
if config.getboolean(simulation, "Dynamic"):
file_name = config.get(simulation, "FileName")
file_path = os.path.abspath("geobeam/user_motion_files/" + file_name)
# Creating New Route File
if config.getboolean(simulation, "CreateFile"):
speed = config.getfloat(simulation, "Speed")
frequency = DEFAULT_FREQUENCY
if config.has_option(simulation, "GpxSourcePath"):
gpx_source_path = config.get(simulation, "GpxSourcePath")
user_motion = TimedRoute.from_gpx(gpx_source_path, speed, frequency)
else:
start_latitude = config.getfloat(simulation, "StartLatitude")
start_longitude = config.getfloat(simulation, "StartLongitude")
end_latitude = config.getfloat(simulation, "EndLatitude")
end_longitude = config.getfloat(simulation, "EndLongitude")
location1 = gps_utils.Location(start_latitude, start_longitude)
location2 = gps_utils.Location(end_latitude, end_longitude)
user_motion = TimedRoute.from_start_and_end(location1, location2, speed, frequency)
user_motion.write_route(file_name)
simulation_set_builder.add_dynamic_route(file_path,
run_duration=run_duration,
gain=gain)
# Static Simulation
else:
latitude = config.getfloat(simulation, "Latitude")
longitude = config.getfloat(simulation, "Longitude")
simulation_set_builder.add_static_route(latitude,
longitude,
run_duration=run_duration,
gain=gain)
except configparser.NoOptionError as err:
print("Error in reading value from configuration file: %s" % err)
return
simulation_set = simulation_set_builder.build()
simulation_set.run_simulations()
if __name__ == "__main__":
sys.exit(main(sys.argv[1]))
| 38.123457 | 95 | 0.661917 |
acdfe2e7c82ab444d82dd2504338d36bfec3dd4a | 7,574 | py | Python | procgen/env.py | AGKhalil/procgen | 3c9e8a50a9bd535175001f1978d29b3412394bcd | [
"MIT"
] | null | null | null | procgen/env.py | AGKhalil/procgen | 3c9e8a50a9bd535175001f1978d29b3412394bcd | [
"MIT"
] | null | null | null | procgen/env.py | AGKhalil/procgen | 3c9e8a50a9bd535175001f1978d29b3412394bcd | [
"MIT"
] | null | null | null | import os
import random
from typing import Sequence, Optional, List
import gym3
from gym3.libenv import CEnv
import numpy as np
from .build import build
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
MAX_STATE_SIZE = 2 ** 20
ENV_NAMES = [
"bigfish",
"bossfight",
"caveflyer",
"chaser",
"climber",
"coinrun",
"dodgeball",
"fruitbot",
"heist",
"jumper",
"leaper",
"maze",
"miner",
"ninja",
"plunder",
"starpilot",
]
EXPLORATION_LEVEL_SEEDS = {
"coinrun": 1949448038,
"caveflyer": 1259048185,
"leaper": 1318677581,
"jumper": 1434825276,
"maze": 158988835,
"heist": 876640971,
"climber": 1561126160,
"ninja": 1123500215,
}
# should match DistributionMode in game.h, except for 'exploration' which is handled by Python
DISTRIBUTION_MODE_DICT = {
"easy": 0,
"hard": 1,
"extreme": 2,
"memory": 10,
"exploration": 20,
}
def create_random_seed():
rand_seed = random.SystemRandom().randint(0, 2 ** 31 - 1)
try:
# force MPI processes to definitely choose different random seeds
from mpi4py import MPI
rand_seed = rand_seed - (rand_seed % MPI.COMM_WORLD.size) + MPI.COMM_WORLD.rank
except ModuleNotFoundError:
pass
return rand_seed
class BaseProcgenEnv(CEnv):
"""
Base procedurally generated environment
"""
def __init__(
self,
num,
env_name,
options,
wandb,
debug=False,
rand_seed=None,
num_levels=0,
start_level=0,
use_sequential_levels=False,
debug_mode=0,
resource_root=None,
num_threads=4,
render_mode=None,
):
if resource_root is None:
resource_root = os.path.join(SCRIPT_DIR, "data", "assets") + os.sep
assert os.path.exists(resource_root)
lib_dir = os.path.join(SCRIPT_DIR, "data", "prebuilt")
if os.path.exists(lib_dir):
assert any([os.path.exists(os.path.join(lib_dir, name)) for name in ["libenv.so", "libenv.dylib", "env.dll"]]), "package is installed, but the prebuilt environment library is missing"
assert not debug, "debug has no effect for pre-compiled library"
else:
# only compile if we don't find a pre-built binary
lib_dir = build(debug=debug)
self.combos = self.get_combos()
if render_mode is None:
render_human = False
elif render_mode == "rgb_array":
render_human = True
else:
raise Exception(f"invalid render mode {render_mode}")
if rand_seed is None:
rand_seed = create_random_seed()
options.update(
{
"env_name": env_name,
"num_levels": num_levels,
"start_level": start_level,
"num_actions": len(self.combos),
"use_sequential_levels": bool(use_sequential_levels),
"debug_mode": debug_mode,
"rand_seed": rand_seed,
"num_threads": num_threads,
"render_human": render_human,
# these will only be used the first time an environment is created in a process
"resource_root": resource_root,
}
)
self.options = options
super().__init__(
lib_dir=lib_dir,
num=num,
options=options,
c_func_defs=[
"int get_state(libenv_env *, int, char *, int);",
"void set_state(libenv_env *, int, char *, int);",
],
)
# don't use the dict space for actions
self.ac_space = self.ac_space["action"]
def get_state(self):
length = MAX_STATE_SIZE
buf = self._ffi.new(f"char[{length}]")
result = []
for env_idx in range(self.num):
n = self.call_c_func("get_state", env_idx, buf, length)
result.append(bytes(self._ffi.buffer(buf, n)))
return result
def set_state(self, states):
assert len(states) == self.num
for env_idx in range(self.num):
state = states[env_idx]
self.call_c_func("set_state", env_idx, state, len(state))
def get_combos(self):
return [
("LEFT", "DOWN"),
("LEFT",),
("LEFT", "UP"),
("DOWN",),
(),
("UP",),
("RIGHT", "DOWN"),
("RIGHT",),
("RIGHT", "UP"),
("D",),
("A",),
("W",),
("S",),
("Q",),
("E",),
]
def keys_to_act(self, keys_list: Sequence[Sequence[str]]) -> List[Optional[np.ndarray]]:
"""
Convert list of keys being pressed to actions, used in interactive mode
"""
result = []
for keys in keys_list:
action = None
max_len = -1
for i, combo in enumerate(self.get_combos()):
pressed = True
for key in combo:
if key not in keys:
pressed = False
if pressed and (max_len < len(combo)):
action = i
max_len = len(combo)
if action is not None:
action = np.array([action])
result.append(action)
return result
def act(self, ac):
# tensorflow may return int64 actions (https://github.com/openai/gym/blob/master/gym/spaces/discrete.py#L13)
# so always cast actions to int32
return super().act({"action": ac.astype(np.int32)})
class ProcgenGym3Env(BaseProcgenEnv):
"""
gym3 interface for Procgen
"""
def __init__(
self,
num,
env_name,
center_agent=True,
use_backgrounds=True,
use_monochrome_assets=False,
restrict_themes=False,
use_generated_assets=False,
paint_vel_info=False,
distribution_mode="hard",
**kwargs,
):
assert (
distribution_mode in DISTRIBUTION_MODE_DICT
), f'"{distribution_mode}" is not a valid distribution mode.'
if distribution_mode == "exploration":
assert (
env_name in EXPLORATION_LEVEL_SEEDS
), f"{env_name} does not support exploration mode"
distribution_mode = DISTRIBUTION_MODE_DICT["hard"]
assert "num_levels" not in kwargs, "exploration mode overrides num_levels"
kwargs["num_levels"] = 1
assert "start_level" not in kwargs, "exploration mode overrides start_level"
kwargs["start_level"] = EXPLORATION_LEVEL_SEEDS[env_name]
else:
distribution_mode = DISTRIBUTION_MODE_DICT[distribution_mode]
options = {
"center_agent": bool(center_agent),
"use_generated_assets": bool(use_generated_assets),
"use_monochrome_assets": bool(use_monochrome_assets),
"restrict_themes": bool(restrict_themes),
"use_backgrounds": bool(use_backgrounds),
"paint_vel_info": bool(paint_vel_info),
"distribution_mode": distribution_mode,
}
super().__init__(num, env_name, options, **kwargs)
def ProcgenEnv(num_envs, env_name, **kwargs):
"""
Baselines VecEnv interface for Procgen
"""
return gym3.ToBaselinesVecEnv(ProcgenGym3Env(num=num_envs, env_name=env_name, **kwargs))
| 29.701961 | 195 | 0.56047 |
acdfe3387cc5b77b7b62cf925a2d08c81c684dbb | 393 | py | Python | 1 TecWeb/181008 Aula 11/venv/projetos/projetos/wsgi.py | nathaliaescarlate/Aulas2S | d034d9ffe97d63ded2df4ecbd00cb5c7b78a0107 | [
"Apache-2.0"
] | null | null | null | 1 TecWeb/181008 Aula 11/venv/projetos/projetos/wsgi.py | nathaliaescarlate/Aulas2S | d034d9ffe97d63ded2df4ecbd00cb5c7b78a0107 | [
"Apache-2.0"
] | 4 | 2020-02-11T23:16:33.000Z | 2021-06-10T20:55:51.000Z | 1 TecWeb/181008 Aula 11/venv/projetos/projetos/wsgi.py | nathaliaescarlate/Aulas2S | d034d9ffe97d63ded2df4ecbd00cb5c7b78a0107 | [
"Apache-2.0"
] | null | null | null | """
WSGI config for projetos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projetos.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
acdfe67fa38ed4cc5c5ed5cd2ce3ce6b26bd8a92 | 19,386 | py | Python | Lib/test/test_named_expression.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | 2 | 2021-02-01T14:26:42.000Z | 2021-03-12T21:37:48.000Z | Lib/test/test_named_expression.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | Lib/test/test_named_expression.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | import unittest
GLOBAL_VAR = None
class NamedExpressionInvalidTest(unittest.TestCase):
def test_named_expression_invalid_01(self):
code = """x := 0"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_02(self):
code = """x = y := 0"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_03(self):
code = """y := f(x)"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_04(self):
code = """y0 = y1 := f(x)"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_06(self):
code = """((a, b) := (1, 2))"""
#with self.assertRaisesRegex(SyntaxError, "cannot use assignment expressions with tuple"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_07(self):
code = """def spam(a = b := 42): pass"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_08(self):
code = """def spam(a: b := 42 = 5): pass"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_09(self):
code = """spam(a=b := 'c')"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_10(self):
code = """spam(x = y := f(x))"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_11(self):
code = """spam(a=1, b := 2)"""
#with self.assertRaisesRegex(SyntaxError,
# "positional argument follows keyword argument"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_12(self):
code = """spam(a=1, (b := 2))"""
#with self.assertRaisesRegex(SyntaxError,
# "positional argument follows keyword argument"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_13(self):
code = """spam(a=1, (b := 2))"""
#with self.assertRaisesRegex(SyntaxError,
# "positional argument follows keyword argument"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_14(self):
code = """(x := lambda: y := 1)"""
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_15(self):
code = """(lambda: x := 1)"""
#with self.assertRaisesRegex(SyntaxError,
# "cannot use assignment expressions with lambda"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_16(self):
code = "[i + 1 for i in i := [1,2]]"
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_17(self):
code = "[i := 0, j := 1 for i, j in [(1, 2), (3, 4)]]"
#with self.assertRaisesRegex(SyntaxError, "invalid syntax"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_in_class_body(self):
code = """class Foo():
[(42, 1 + ((( j := i )))) for i in range(5)]
"""
#with self.assertRaisesRegex(SyntaxError,
# "assignment expression within a comprehension cannot be used in a class body"): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_rebinding_comprehension_iteration_variable(self):
cases = [
("Local reuse", 'i', "[i := 0 for i in range(5)]"),
("Nested reuse", 'j', "[[(j := 0) for i in range(5)] for j in range(5)]"),
("Reuse inner loop target", 'j', "[(j := 0) for i in range(5) for j in range(5)]"),
("Unpacking reuse", 'i', "[i := 0 for i, j in [(0, 1)]]"),
("Reuse in loop condition", 'i', "[i+1 for i in range(5) if (i := 0)]"),
("Unreachable reuse", 'i', "[False or (i:=0) for i in range(5)]"),
("Unreachable nested reuse", 'i',
"[(i, j) for i in range(5) for j in range(5) if True or (i:=10)]"),
]
for case, target, code in cases:
#msg = f"assignment expression cannot rebind comprehension iteration variable '{target}'"
with self.subTest(case=case):
#with self.assertRaisesRegex(SyntaxError, msg): #TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {})
def test_named_expression_invalid_rebinding_comprehension_inner_loop(self):
cases = [
("Inner reuse", 'j', "[i for i in range(5) if (j := 0) for j in range(5)]"),
("Inner unpacking reuse", 'j', "[i for i in range(5) if (j := 0) for j, k in [(0, 1)]]"),
]
for case, target, code in cases:
#msg = f"comprehension inner loop cannot rebind assignment expression target '{target}'"
with self.subTest(case=case):
#with self.assertRaisesRegex(SyntaxError, msg): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}) # Module scope
#with self.assertRaisesRegex(SyntaxError, msg): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {}) # Class scope
#with self.assertRaisesRegex(SyntaxError, msg): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(f"lambda: {code}", {}) # Function scope
def test_named_expression_invalid_comprehension_iterable_expression(self):
cases = [
("Top level", "[i for i in (i := range(5))]"),
("Inside tuple", "[i for i in (2, 3, i := range(5))]"),
("Inside list", "[i for i in [2, 3, i := range(5)]]"),
("Different name", "[i for i in (j := range(5))]"),
("Lambda expression", "[i for i in (lambda:(j := range(5)))()]"),
("Inner loop", "[i for i in range(5) for j in (i := range(5))]"),
("Nested comprehension", "[i for i in [j for j in (k := range(5))]]"),
("Nested comprehension condition", "[i for i in [j for j in range(5) if (j := True)]]"),
("Nested comprehension body", "[i for i in [(j := True) for j in range(5)]]"),
]
#msg = "assignment expression cannot be used in a comprehension iterable expression"
for case, code in cases:
with self.subTest(case=case):
#with self.assertRaisesRegex(SyntaxError, msg): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}) # Module scope
#with self.assertRaisesRegex(SyntaxError, msg): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(code, {}, {}) # Class scope
#with self.assertRaisesRegex(SyntaxError, msg): # TODO RustPython
with self.assertRaises(SyntaxError):
exec(f"lambda: {code}", {}) # Function scope
class NamedExpressionAssignmentTest(unittest.TestCase):
def test_named_expression_assignment_01(self):
(a := 10)
self.assertEqual(a, 10)
def test_named_expression_assignment_02(self):
a = 20
(a := a)
self.assertEqual(a, 20)
def test_named_expression_assignment_03(self):
(total := 1 + 2)
self.assertEqual(total, 3)
def test_named_expression_assignment_04(self):
(info := (1, 2, 3))
self.assertEqual(info, (1, 2, 3))
def test_named_expression_assignment_05(self):
(x := 1, 2)
self.assertEqual(x, 1)
def test_named_expression_assignment_06(self):
(z := (y := (x := 0)))
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_named_expression_assignment_07(self):
(loc := (1, 2))
self.assertEqual(loc, (1, 2))
def test_named_expression_assignment_08(self):
if spam := "eggs":
self.assertEqual(spam, "eggs")
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_09(self):
if True and (spam := True):
self.assertTrue(spam)
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_10(self):
if (match := 10) == 10:
pass
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_11(self):
def spam(a):
return a
input_data = [1, 2, 3]
res = [(x, y, x/y) for x in input_data if (y := spam(x)) > 0]
self.assertEqual(res, [(1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0)])
def test_named_expression_assignment_12(self):
def spam(a):
return a
res = [[y := spam(x), x/y] for x in range(1, 5)]
self.assertEqual(res, [[1, 1.0], [2, 1.0], [3, 1.0], [4, 1.0]])
def test_named_expression_assignment_13(self):
length = len(lines := [1, 2])
self.assertEqual(length, 2)
self.assertEqual(lines, [1,2])
def test_named_expression_assignment_14(self):
"""
Where all variables are positive integers, and a is at least as large
as the n'th root of x, this algorithm returns the floor of the n'th
root of x (and roughly doubling the number of accurate bits per
iteration):
"""
a = 9
n = 2
x = 3
while a > (d := x // a**(n-1)):
a = ((n-1)*a + d) // n
self.assertEqual(a, 1)
def test_named_expression_assignment_15(self):
while a := False:
pass # This will not run
self.assertEqual(a, False)
def test_named_expression_assignment_16(self):
a, b = 1, 2
fib = {(c := a): (a := b) + (b := a + c) - b for __ in range(6)}
self.assertEqual(fib, {1: 2, 2: 3, 3: 5, 5: 8, 8: 13, 13: 21})
class NamedExpressionScopeTest(unittest.TestCase):
def test_named_expression_scope_01(self):
code = """def spam():
(a := 5)
print(a)"""
with self.assertRaisesRegex(NameError, "name 'a' is not defined"):
exec(code, {}, {})
def test_named_expression_scope_02(self):
total = 0
partial_sums = [total := total + v for v in range(5)]
self.assertEqual(partial_sums, [0, 1, 3, 6, 10])
self.assertEqual(total, 10)
def test_named_expression_scope_03(self):
containsOne = any((lastNum := num) == 1 for num in [1, 2, 3])
self.assertTrue(containsOne)
self.assertEqual(lastNum, 1)
def test_named_expression_scope_04(self):
def spam(a):
return a
res = [[y := spam(x), x/y] for x in range(1, 5)]
self.assertEqual(y, 4)
def test_named_expression_scope_05(self):
def spam(a):
return a
input_data = [1, 2, 3]
res = [(x, y, x/y) for x in input_data if (y := spam(x)) > 0]
self.assertEqual(res, [(1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0)])
self.assertEqual(y, 3)
def test_named_expression_scope_06(self):
res = [[spam := i for i in range(3)] for j in range(2)]
self.assertEqual(res, [[0, 1, 2], [0, 1, 2]])
self.assertEqual(spam, 2)
# modified version of test_named_expression_scope_6, where locals
# assigned before to make them known in scop. THis is required due
# to some shortcommings in RPs name handling.
def test_named_expression_scope_06_rp_modified(self):
spam=0
res = [[spam := i for i in range(3)] for j in range(2)]
self.assertEqual(res, [[0, 1, 2], [0, 1, 2]])
self.assertEqual(spam, 2)
def test_named_expression_scope_07(self):
len(lines := [1, 2])
self.assertEqual(lines, [1, 2])
def test_named_expression_scope_08(self):
def spam(a):
return a
def eggs(b):
return b * 2
res = [spam(a := eggs(b := h)) for h in range(2)]
self.assertEqual(res, [0, 2])
self.assertEqual(a, 2)
self.assertEqual(b, 1)
def test_named_expression_scope_09(self):
def spam(a):
return a
def eggs(b):
return b * 2
res = [spam(a := eggs(a := h)) for h in range(2)]
self.assertEqual(res, [0, 2])
self.assertEqual(a, 2)
def test_named_expression_scope_10(self):
res = [b := [a := 1 for i in range(2)] for j in range(2)]
self.assertEqual(res, [[1, 1], [1, 1]])
self.assertEqual(a, 1)
self.assertEqual(b, [1, 1])
# modified version of test_named_expression_scope_10, where locals
# assigned before to make them known in scop. THis is required due
# to some shortcommings in RPs name handling.
def test_named_expression_scope_10_rp_modified(self):
a=0
b=0
res = [b := [a := 1 for i in range(2)] for j in range(2)]
self.assertEqual(res, [[1, 1], [1, 1]])
self.assertEqual(b, [1, 1])
self.assertEqual(a, 1)
def test_named_expression_scope_11(self):
res = [j := i for i in range(5)]
self.assertEqual(res, [0, 1, 2, 3, 4])
self.assertEqual(j, 4)
def test_named_expression_scope_17(self):
b = 0
res = [b := i + b for i in range(5)]
self.assertEqual(res, [0, 1, 3, 6, 10])
self.assertEqual(b, 10)
def test_named_expression_scope_18(self):
def spam(a):
return a
res = spam(b := 2)
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_19(self):
def spam(a):
return a
res = spam((b := 2))
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_20(self):
def spam(a):
return a
res = spam(a=(b := 2))
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_21(self):
def spam(a, b):
return a + b
res = spam(c := 2, b=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_22(self):
def spam(a, b):
return a + b
res = spam((c := 2), b=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_23(self):
def spam(a, b):
return a + b
res = spam(b=(c := 2), a=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_24(self):
a = 10
def spam():
nonlocal a
(a := 20)
spam()
self.assertEqual(a, 20)
def test_named_expression_scope_25(self):
ns = {}
code = """a = 10
def spam():
global a
(a := 20)
spam()"""
exec(code, ns, {})
self.assertEqual(ns["a"], 20)
def test_named_expression_variable_reuse_in_comprehensions(self):
# The compiler is expected to raise syntax error for comprehension
# iteration variables, but should be fine with rebinding of other
# names (e.g. globals, nonlocals, other assignment expressions)
# The cases are all defined to produce the same expected result
# Each comprehension is checked at both function scope and module scope
rebinding = "[x := i for i in range(3) if (x := i) or not x]"
filter_ref = "[x := i for i in range(3) if x or not x]"
body_ref = "[x for i in range(3) if (x := i) or not x]"
nested_ref = "[j for i in range(3) if x or not x for j in range(3) if (x := i)][:-3]"
cases = [
("Rebind global", f"x = 1; result = {rebinding}"),
("Rebind nonlocal", f"result, x = (lambda x=1: ({rebinding}, x))()"),
("Filter global", f"x = 1; result = {filter_ref}"),
("Filter nonlocal", f"result, x = (lambda x=1: ({filter_ref}, x))()"),
("Body global", f"x = 1; result = {body_ref}"),
("Body nonlocal", f"result, x = (lambda x=1: ({body_ref}, x))()"),
("Nested global", f"x = 1; result = {nested_ref}"),
("Nested nonlocal", f"result, x = (lambda x=1: ({nested_ref}, x))()"),
]
for case, code in cases:
with self.subTest(case=case):
ns = {}
exec(code, ns)
self.assertEqual(ns["x"], 2)
self.assertEqual(ns["result"], [0, 1, 2])
def test_named_expression_global_scope(self):
sentinel = object()
global GLOBAL_VAR
def f():
global GLOBAL_VAR
[GLOBAL_VAR := sentinel for _ in range(1)]
self.assertEqual(GLOBAL_VAR, sentinel)
try:
f()
self.assertEqual(GLOBAL_VAR, sentinel)
finally:
GLOBAL_VAR = None
def test_named_expression_global_scope_no_global_keyword(self):
sentinel = object()
def f():
GLOBAL_VAR = None
[GLOBAL_VAR := sentinel for _ in range(1)]
self.assertEqual(GLOBAL_VAR, sentinel)
f()
self.assertEqual(GLOBAL_VAR, None)
def test_named_expression_nonlocal_scope(self):
sentinel = object()
def f():
nonlocal_var = None
def g():
nonlocal nonlocal_var
[nonlocal_var := sentinel for _ in range(1)]
g()
self.assertEqual(nonlocal_var, sentinel)
f()
def test_named_expression_nonlocal_scope_no_nonlocal_keyword(self):
sentinel = object()
def f():
nonlocal_var = None
def g():
[nonlocal_var := sentinel for _ in range(1)]
g()
self.assertEqual(nonlocal_var, None)
f()
if __name__ == "__main__":
unittest.main()
| 34.250883 | 116 | 0.565305 |
acdfe73e2cbd1c3885bc41b85f3275cfe812e753 | 441 | py | Python | 0x0F-python-object_relational_mapping/model_state.py | Nahi-Terefe/alx-higher_level_programming | c67a78a6f79e853918963971f8352979e7691541 | [
"MIT"
] | null | null | null | 0x0F-python-object_relational_mapping/model_state.py | Nahi-Terefe/alx-higher_level_programming | c67a78a6f79e853918963971f8352979e7691541 | [
"MIT"
] | null | null | null | 0x0F-python-object_relational_mapping/model_state.py | Nahi-Terefe/alx-higher_level_programming | c67a78a6f79e853918963971f8352979e7691541 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
link the class with the database
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
Base = declarative_base()
class State(Base):
"""State class."""
__tablename__ = 'states'
id = Column(Integer,
primary_key=True,
nullable=False,
autoincrement="auto")
name = Column(String(128), nullable=False)
| 21 | 55 | 0.643991 |
acdfe7a643da7451f092e2ffed98d1fc5e32983f | 853 | py | Python | src/sparsezoo/__init__.py | signalism/sparsezoo | 5ca44f8cb514e80844034920d743baba97279ec2 | [
"Apache-2.0"
] | 1 | 2021-07-04T07:29:50.000Z | 2021-07-04T07:29:50.000Z | src/sparsezoo/__init__.py | PIlotcnc/new | 6e6413632de01f6acf691dca8fadb84f841444b9 | [
"Apache-2.0"
] | null | null | null | src/sparsezoo/__init__.py | PIlotcnc/new | 6e6413632de01f6acf691dca8fadb84f841444b9 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functionality for accessing models, recipes, and supporting files in the SparseZoo
"""
# flake8: noqa
# isort: skip_file
from .version import *
from .main import *
from .models.zoo import *
from .objects import *
| 30.464286 | 82 | 0.749121 |
acdfe7bb25c6f1e01430a17f9ef15af6cf8f84d0 | 2,315 | py | Python | comptools/tests/test_io.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | null | null | null | comptools/tests/test_io.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | 7 | 2017-08-29T16:20:04.000Z | 2018-06-12T16:58:36.000Z | comptools/tests/test_io.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | 1 | 2018-04-03T20:56:40.000Z | 2018-04-03T20:56:40.000Z |
from __future__ import division
import sys
import pytest
import numpy as np
import pandas as pd
from comptools.io import load_sim, load_data
@pytest.mark.needs_data
def test_load_sim_test_size():
test_size = 0.4
df_train, df_test = load_sim(test_size=test_size, energy_reco=False,
log_energy_min=None, log_energy_max=None)
n_train = len(df_train)
n_test = len(df_test)
np.testing.assert_allclose(n_test / (n_test + n_train), test_size,
rtol=1e-2)
@pytest.mark.needs_data
def test_load_sim_log_energy_min():
log_energy_min = 7.5
df = load_sim(test_size=0, energy_reco=False,
energy_cut_key='MC_log_energy',
log_energy_min=log_energy_min, log_energy_max=None)
np.testing.assert_allclose(log_energy_min, df['MC_log_energy'].min(),
rtol=1e-2)
@pytest.mark.needs_data
def test_load_sim_log_energy_max():
log_energy_max = 7.5
df = load_sim(test_size=0, energy_reco=False,
energy_cut_key='MC_log_energy',
log_energy_min=None, log_energy_max=log_energy_max)
np.testing.assert_allclose(log_energy_max, df['MC_log_energy'].max(),
rtol=1e-2)
@pytest.mark.needs_data
@pytest.mark.parametrize('energy_reco', [True, False])
def test_load_sim_energy_reco(energy_reco):
df = load_sim(test_size=0, energy_reco=energy_reco,
log_energy_min=None, log_energy_max=None)
assert ('reco_log_energy' in df.columns) == energy_reco
@pytest.mark.needs_data
def test_load_sim_split():
df_train_0, df_test_0 = load_sim(config='IC86.2012',
energy_reco=False,
log_energy_min=None,
log_energy_max=None,
test_size=0.5)
df_train_1, df_test_1 = load_sim(config='IC86.2012',
energy_reco=False,
log_energy_min=None,
log_energy_max=None,
test_size=0.5)
pd.testing.assert_frame_equal(df_train_0, df_train_1)
pd.testing.assert_frame_equal(df_test_0, df_test_1)
| 34.044118 | 74 | 0.605616 |
acdfe80d66b616121c44cbe6021fba7c59eb7473 | 338 | py | Python | 2013_06_19/fizzbuzz.py | samisafatli/dojo | 3751f8413e70da84e928e037193e8cb03f6b3e65 | [
"MIT"
] | 114 | 2015-03-10T22:17:42.000Z | 2022-03-09T17:49:48.000Z | 2013_06_19/fizzbuzz.py | samisafatli/dojo | 3751f8413e70da84e928e037193e8cb03f6b3e65 | [
"MIT"
] | 9 | 2018-09-04T12:49:59.000Z | 2019-11-17T21:29:51.000Z | 2013_06_19/fizzbuzz.py | samisafatli/dojo | 3751f8413e70da84e928e037193e8cb03f6b3e65 | [
"MIT"
] | 39 | 2015-01-29T01:20:56.000Z | 2022-02-17T16:26:25.000Z |
def fizzbuzz(numero):
if numero % 3 == 0 and numero % 5 == 0:
return 'FizzBuzz'
if numero % 3 == 0:
return 'Fizz'
if numero % 5 == 0:
return 'Buzz'
return numero
def fizzbuzz_lista(lista):
resultado = []
for numero in lista:
resultado.append(fizzbuzz(numero))
return resultado | 22.533333 | 43 | 0.576923 |
acdfe964536341100b75a67ac941cc4a489a7d8c | 1,128 | py | Python | dex/util.py | ardunn/dion | db1d2ae93d6b729a8c066e200b0e2e6e0dd0ff21 | [
"BSD-2-Clause"
] | 15 | 2020-08-23T19:41:49.000Z | 2021-05-24T00:03:33.000Z | dex/util.py | ardunn/dex | db1d2ae93d6b729a8c066e200b0e2e6e0dd0ff21 | [
"BSD-2-Clause"
] | 46 | 2020-07-29T04:59:55.000Z | 2021-12-10T08:42:43.000Z | dex/util.py | ardunn/dionysus | db1d2ae93d6b729a8c066e200b0e2e6e0dd0ff21 | [
"BSD-2-Clause"
] | 2 | 2020-08-24T17:26:22.000Z | 2021-07-31T17:04:43.000Z | import os
def initiate_editor(path):
os.system(f"$EDITOR \"{path}\"")
class AttrDict(dict):
""" Syntax candy """
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class TerminalStyle:
"""
Styling of the terminal fonts.
"""
black = '\033[30m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
magenta = '\033[35m'
cyan = '\033[36m'
white = '\033[37m'
underline = '\033[4m'
reset = '\033[0m'
colormap = {
"k": black,
"r": red,
"y": yellow,
"g": green,
"b": blue,
"m": magenta,
"c": cyan,
"w": white,
"u": underline,
"x": reset,
}
def f(self, color: str, string_to_format: str) -> str:
"""
Format a string with color.
Args:
color (str): Letter code for the color
string_to_format (str): The string to colorize
Returns:
Colorized string
"""
return self.colormap[color] + string_to_format + self.colormap["x"] | 21.283019 | 75 | 0.518617 |
acdfecfd325723c20b441657ce2372cf10fa29d2 | 7,000 | py | Python | basic_block_gp/blockchain.py | cocoitali/Blockchain | cec710fd714616a3e3459ef515da7b18413cb931 | [
"MIT"
] | null | null | null | basic_block_gp/blockchain.py | cocoitali/Blockchain | cec710fd714616a3e3459ef515da7b18413cb931 | [
"MIT"
] | null | null | null | basic_block_gp/blockchain.py | cocoitali/Blockchain | cec710fd714616a3e3459ef515da7b18413cb931 | [
"MIT"
] | null | null | null | import hashlib
import json
from time import time
from uuid import uuid4
from flask import Flask, jsonify, request
class Blockchain(object):
def __init__(self):
self.chain = [] # chain of blocks
self.current_transactions = [] # transactions cuing up to be part of the next block that gets added to end of chain
self.nodes = set() # actors
self.new_block(previous_hash=1, proof=100)
def new_block(self, proof, previous_hash=None):
"""
Create a new Block in the Blockchain
:param proof: <int> The proof given by the Proof of Work algorithm
:param previous_hash: (Optional) <str> Hash of previous Block
:return: <dict> New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1]), #generate hash for previous block
}
# Reset the current list of transactions
self.current_transactions = []
self.chain.append(block) # add the block to the chain
return block
def new_transaction(self, sender, recipient, amount):
"""
Creates a new transaction to go into the next mined Block
:param sender: <str> Address of the Sender
:param recipient: <str> Address of the Recipient
:param amount: <int> Amount
:return: <int> The index of the BLock that will hold this transaction
"""
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.last_block['index'] + 1
@staticmethod
def hash(block):
"""
Creates a SHA-256 hash of a Block
:param block": <dict> Block
"return": <str>
"""
# json.dumps converts json into a string
# hashlib.sha246 is used to createa hash
# It requires a `bytes-like` object, which is what
# .encode() does. It convertes the string to bytes.
# We must make sure that the Dictionary is Ordered,
# or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
# By itself, this function returns the hash in a raw string
# that will likely include escaped characters.
# This can be hard to read, but .hexdigest() converts the
# hash to a string using hexadecimal characters, which is
# easer to work with and understand.
return hashlib.sha256(block_string).hexdigest()
@property
def last_block(self):
return self.chain[-1]
def proof_of_work(self):
"""
Simple Proof of Work Algorithm
- Find a number p' such that hash(pp') contains 6 leading
zeroes, where p is the previous p'
- p is the previous proof, and p' is the new proof
"""
block_string = json.dumps(self.last_block, sort_keys=True)
proof = 0
# for block 1, hash(1, p) = 000000x
while self.valid_proof(block_string, proof) is False :
proof += 1
return proof
@staticmethod
def valid_proof(block_string, proof):
"""
Validates the Proof: Does hash(block_string, proof) contain 6
leading zeroes? """
# hash the block string and proof together
guess = f'{block_string}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
# return True if that hash starts with 6 zeros, Flase otherwise
guess_hash[:6] == '000000'
def valid_chain(self, chain): #find the longest chain w/ correct hashes
"""Determine if a given blockchain is valid
:param chain: <list> A blockchain
:return: <bool> True if valid, False if not
"""
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
print(f'{last_block}')
print(f'{block}')
print("\n-------------------\n")
# Check that the hash of the block is correct
# TODO: Return false if hash isn't correct
if self.hash(last_block) !=block['previous_hash']:
return False
block_string = json.dumps(last_block, sort_keys=True)
# Check that the Proof of Work is correct
if not self.valid_proof(block_string, block['proof']):
return False
last_block = block
current_index += 1
return True
# Instantiate our Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm to get the next proof...
proof = blockchain.proof_of_work()
# We must receive a reward for finding the proof.
# The sender is "0" to signify that this node has mine a new coin
# The recipient is the current node, it did the mining!
# The amount is 1 coin as a reward for mining the next block
# new_transaction(self, sender, recipient, amount)
blockchain.new_transaction(0, node_identifier, 1 )
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(blockchain.last_block)
block = blockchain.new_block(proof, previous_hash)
# Send a response with the new block
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing Values', 400
# Create a new Transaction
index = blockchain.new_transaction(values['sender'],
values['recipient'],
values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def last_proof():
response = {
'currentChain' : blockchain.chain,
'length' : len(blockchain.chain)
}
return jsonify(response), 200
@app.route('/valid-chain', methods=['GET'])
def valid_chain():
response = {
'valid-chain': blockchain.valid_chain(blockchain.chain)
}
return jsonify(response), 200
# Run the program on port 5000
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug = True)
| 31.390135 | 123 | 0.609571 |
acdfed3708d2acf1ed91662cc3996dcd4b50a32e | 1,889 | py | Python | importutil.py | moin18/importutil | f3eaef088cd04bf6010bf5567252b977ad73e9ec | [
"MIT"
] | null | null | null | importutil.py | moin18/importutil | f3eaef088cd04bf6010bf5567252b977ad73e9ec | [
"MIT"
] | null | null | null | importutil.py | moin18/importutil | f3eaef088cd04bf6010bf5567252b977ad73e9ec | [
"MIT"
] | null | null | null | import sys
def delete_module(modname):
"""
Delete module and sub-modules from `sys.module`
"""
try:
_ = sys.modules[modname]
except KeyError:
raise ValueError("Module not found in sys.modules: '{}'".format(modname))
for module in list(sys.modules.keys()):
if module and module.startswith(modname):
del sys.modules[module]
def reload_module(module):
"""
Reload the Python module
"""
try:
# For Python 2.x
reload(module)
except (ImportError, NameError):
# For <= Python3.3:
import imp
imp.reload(module)
except (ImportError, NameError):
# For >= Python3.4
import importlib
importlib.reload(module)
def lazy_load_modules(*modules):
"""
Decorator to load module to perform related operation for specific function
and delete the module from imports once the task is done. GC frees the memory
related to module during clean-up.
"""
def decorator(function):
def wrapper(*args, **kwargs):
module_dict = {}
for module_string in modules:
module = __import__(module_string)
# Add `module` entry in `sys.modules`. After deleting the module
# from `sys.modules` and re-importing the module don't update
# the module entry in `sys.modules` dict
sys.modules[module.__package__] = module
reload_module(module)
module_dict[module_string] = module
func_response = function(*args, **kwargs)
for module_string, module in module_dict.items():
# delete idna module
delete_module(module_string)
del module # delete reference to idna
return func_response
return wrapper
return decorator
| 29.061538 | 82 | 0.597141 |
acdfed6f2dd52affa1f27fe269e794982a552963 | 20,507 | py | Python | ios/build/bots/scripts/xcode_log_parser_test.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ios/build/bots/scripts/xcode_log_parser_test.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ios/build/bots/scripts/xcode_log_parser_test.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-03-07T14:20:02.000Z | 2021-03-07T14:20:02.000Z | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for xcode_log_parser.py."""
import json
import mock
import os
import unittest
import test_runner
import test_runner_test
import xcode_log_parser
OUTPUT_PATH = '/tmp/attempt_0'
XCRESULT_PATH = '/tmp/attempt_0.xcresult'
XCODE11_DICT = {
'path': '/Users/user1/Xcode.app',
'version': '11.0',
'build': '11M336w',
}
# A sample of json result when executing xcresulttool on .xcresult dir without
# --id. Some unused keys and values were removed.
XCRESULT_ROOT = """
{
"_type" : {
"_name" : "ActionsInvocationRecord"
},
"actions" : {
"_values" : [
{
"actionResult" : {
"_type" : {
"_name" : "ActionResult"
},
"diagnosticsRef" : {
"id" : {
"_value" : "DIAGNOSTICS_REF_ID"
}
},
"logRef" : {
"id" : {
"_value" : "0~6jr1GkZxoWVzWfcUNA5feff3l7g8fPHJ1rqKetCBa3QXhCGY74PnEuRwzktleMTFounMfCdDpSr1hRfhUGIUEQ=="
}
},
"testsRef" : {
"id" : {
"_value" : "0~iRbOkDnmtKVIvHSV2jkeuNcg4RDTUaCLZV7KijyxdCqvhqtp08MKxl0MwjBAPpjmruoI7qNHzBR1RJQAlANNHA=="
}
}
}
}
]
},
"issues" : {
"testFailureSummaries" : {
"_values" : [
{
"documentLocationInCreatingWorkspace" : {
"url" : {
"_value" : "file:\/\/\/..\/..\/ios\/web\/shell\/test\/page_state_egtest.mm#CharacterRangeLen=0&EndingLineNumber=130&StartingLineNumber=130"
}
},
"message" : {
"_value": "Fail. Screenshots: {\\n\\"Failure\\": \\"path.png\\"\\n}"
},
"testCaseName" : {
"_value": "-[PageStateTestCase testZeroContentOffsetAfterLoad]"
}
}
]
}
},
"metrics" : {
"testsCount" : {
"_value" : "2"
},
"testsFailedCount" : {
"_value" : "1"
}
}
}"""
REF_ID = """
{
"actions": {
"_values": [{
"actionResult": {
"testsRef": {
"id": {
"_value": "REF_ID"
}
}
}
}]
}
}"""
# A sample of json result when executing xcresulttool on .xcresult dir with
# "testsRef" as --id input. Some unused keys and values were removed.
TESTS_REF = """
{
"summaries": {
"_values": [{
"testableSummaries": {
"_type": {
"_name": "Array"
},
"_values": [{
"tests": {
"_type": {
"_name": "Array"
},
"_values": [{
"identifier" : {
"_value" : "All tests"
},
"name" : {
"_value" : "All tests"
},
"subtests": {
"_values": [{
"identifier" : {
"_value" : "ios_web_shell_eg2tests_module.xctest"
},
"name" : {
"_value" : "ios_web_shell_eg2tests_module.xctest"
},
"subtests": {
"_values": [{
"identifier" : {
"_value" : "PageStateTestCase"
},
"name" : {
"_value" : "PageStateTestCase"
},
"subtests": {
"_values": [{
"testStatus": {
"_value": "Success"
},
"identifier": {
"_value": "PageStateTestCase/testMethod1"
},
"name": {
"_value": "testMethod1"
}
},
{
"summaryRef": {
"id": {
"_value": "0~7Q_uAuUSJtx9gtHM08psXFm3g_xiTTg5bpdoDO88nMXo_iMwQTXpqlrlMe5AtkYmnZ7Ux5uEgAe83kJBfoIckw=="
}
},
"testStatus": {
"_value": "Failure"
},
"identifier": {
"_value": "PageStateTestCase\/testZeroContentOffsetAfterLoad"
},
"name": {
"_value": "testZeroContentOffsetAfterLoad"
}
},
{
"testStatus": {
"_value": "Success"
},
"identifier": {
"_value": "PageStateTestCase/testMethod2"
},
"name": {
"_value": "testMethod2"
}
}]
}
}]
}
}]
}
}]
}
}]
}
}]
}
}
"""
# A sample of json result when executing xcresulttool on .xcresult dir with
# a single test summaryRef id value as --id input. Some unused keys and values
# were removed.
SINGLE_TEST_SUMMARY_REF = """
{
"_type" : {
"_name" : "ActionTestSummary",
"_supertype" : {
"_name" : "ActionTestSummaryIdentifiableObject",
"_supertype" : {
"_name" : "ActionAbstractTestSummary"
}
}
},
"activitySummaries" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Screenshot_25659115-F3E4-47AE-AA34-551C94333D7E.jpg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_1"
}
}
}
]
},
"title" : {
"_value" : "Start Test at 2020-10-19 14:12:58.111"
}
},
{
"subactivities" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Screenshot_23D95D0E-8B97-4F99-BE3C-A46EDE5999D7.jpg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_2"
}
}
}
]
},
"subactivities" : {
"_values" : [
{
"subactivities" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Crash_3F0A2B1C-7ADA-436E-A54C-D4C39B8411F8.crash"
},
"payloadRef" : {
"id" : {
"_value" : "CRASH_REF_ID_IN_ACTIVITY_SUMMARIES"
}
}
}
]
},
"title" : {
"_value" : "Wait for org.chromium.ios-web-shell-eg2tests to idle"
}
}
]
},
"title" : {
"_value" : "Activate org.chromium.ios-web-shell-eg2tests"
}
}
]
},
"title" : {
"_value" : "Open org.chromium.ios-web-shell-eg2tests"
}
}
]
},
"title" : {
"_value" : "Set Up"
}
},
{
"title" : {
"_value" : "Find the Target Application 'org.chromium.ios-web-shell-eg2tests'"
}
},
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "Screenshot_278BA84B-2196-4CCD-9D31-2C07DDDC9DFC.jpg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_3"
}
}
}
]
},
"title" : {
"_value" : "Uncaught Exception at page_state_egtest.mm:131: \\nCannot scroll, the..."
}
},
{
"title" : {
"_value" : "Uncaught Exception: Immediately halt execution of testcase (EarlGreyInternalTestInterruptException)"
}
},
{
"title" : {
"_value" : "Tear Down"
}
}
]
},
"failureSummaries" : {
"_values" : [
{
"attachments" : {
"_values" : [
{
"filename" : {
"_value" : "kXCTAttachmentLegacyScreenImageData_1_6CED1FE5-96CA-47EA-9852-6FADED687262.jpeg"
},
"payloadRef" : {
"id" : {
"_value" : "SCREENSHOT_REF_ID_IN_FAILURE_SUMMARIES"
}
}
}
]
},
"fileName" : {
"_value" : "\/..\/..\/ios\/web\/shell\/test\/page_state_egtest.mm"
},
"lineNumber" : {
"_value" : "131"
},
"message" : {
"_value" : "Some logs."
}
},
{
"message" : {
"_value" : "Immediately halt execution of testcase (EarlGreyInternalTestInterruptException)"
}
}
]
},
"identifier" : {
"_value" : "PageStateTestCase\/testZeroContentOffsetAfterLoad"
},
"name" : {
"_value" : "testZeroContentOffsetAfterLoad"
},
"testStatus" : {
"_value" : "Failure"
}
}"""
def _xcresulttool_get_side_effect(xcresult_path, ref_id=None):
"""Side effect for _xcresulttool_get in Xcode11LogParser tested."""
if ref_id is None:
return XCRESULT_ROOT
if ref_id == 'testsRef':
return TESTS_REF
# Other situation in use cases of xcode_log_parser is asking for single test
# summary ref.
return SINGLE_TEST_SUMMARY_REF
class XCode11LogParserTest(test_runner_test.TestCase):
"""Test case to test Xcode11LogParser."""
def setUp(self):
super(XCode11LogParserTest, self).setUp()
self.mock(test_runner, 'get_current_xcode_info', lambda: XCODE11_DICT)
@mock.patch('xcode_util.version', autospec=True)
def testGetParser(self, mock_xcode_version):
mock_xcode_version.return_value = ('12.0', '12A7209')
self.assertEqual(xcode_log_parser.get_parser().__class__.__name__, 'Xcode11LogParser')
mock_xcode_version.return_value = ('11.4', '11E146')
self.assertEqual(xcode_log_parser.get_parser().__class__.__name__, 'Xcode11LogParser')
mock_xcode_version.return_value = ('10.3', '10G8')
self.assertEqual(xcode_log_parser.get_parser().__class__.__name__, 'XcodeLogParser')
@mock.patch('subprocess.check_output', autospec=True)
def testXcresulttoolGetRoot(self, mock_process):
mock_process.return_value = '%JSON%'
xcode_log_parser.Xcode11LogParser()._xcresulttool_get('xcresult_path')
self.assertTrue(
os.path.join(XCODE11_DICT['path'], 'usr', 'bin') in os.environ['PATH'])
self.assertEqual(
['xcresulttool', 'get', '--format', 'json', '--path', 'xcresult_path'],
mock_process.mock_calls[0][1][0])
@mock.patch('subprocess.check_output', autospec=True)
def testXcresulttoolGetRef(self, mock_process):
mock_process.side_effect = [REF_ID, 'JSON']
xcode_log_parser.Xcode11LogParser()._xcresulttool_get('xcresult_path',
'testsRef')
self.assertEqual(
['xcresulttool', 'get', '--format', 'json', '--path', 'xcresult_path'],
mock_process.mock_calls[0][1][0])
self.assertEqual([
'xcresulttool', 'get', '--format', 'json', '--path', 'xcresult_path',
'--id', 'REF_ID'], mock_process.mock_calls[1][1][0])
def testXcresulttoolListFailedTests(self):
failure_message = [
'file:///../../ios/web/shell/test/page_state_egtest.mm#'
'CharacterRangeLen=0&EndingLineNumber=130&StartingLineNumber=130'
] + 'Fail. Screenshots: {\n\"Failure\": \"path.png\"\n}'.splitlines()
expected = {
'PageStateTestCase/testZeroContentOffsetAfterLoad': failure_message
}
self.assertEqual(
expected,
xcode_log_parser.Xcode11LogParser()._list_of_failed_tests(
json.loads(XCRESULT_ROOT)))
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testXcresulttoolListPassedTests(self, mock_xcresult):
mock_xcresult.side_effect = _xcresulttool_get_side_effect
expected = [
'PageStateTestCase/testMethod1', 'PageStateTestCase/testMethod2'
]
results = {'passed': [], 'failed': {}}
xcode_log_parser.Xcode11LogParser()._get_test_statuses(OUTPUT_PATH, results)
self.assertEqual(expected, results['passed'])
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('xcode_log_parser.Xcode11LogParser.copy_artifacts')
@mock.patch('xcode_log_parser.Xcode11LogParser.export_diagnostic_data')
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
@mock.patch('xcode_log_parser.Xcode11LogParser._list_of_failed_tests')
def testCollectTestTesults(self, mock_get_failed_tests, mock_root,
mock_exist_file, *args):
metrics_json = """
{
"metrics": {
"testsCount": {
"_value": "7"
},
"testsFailedCount": {
"_value": "14"
}
}
}"""
expected_test_results = {
'passed': [
'PageStateTestCase/testMethod1', 'PageStateTestCase/testMethod2'
],
'failed': {
'WebUITestCase/testBackForwardFromWebURL': [
'file://<unknown>#CharacterRangeLen=0',
'Test crashed in <external symbol>'
]
}
}
mock_get_failed_tests.return_value = expected_test_results['failed']
mock_root.side_effect = _xcresulttool_get_side_effect
mock_exist_file.return_value = True
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('xcode_log_parser.Xcode11LogParser.copy_artifacts')
@mock.patch('xcode_log_parser.Xcode11LogParser.export_diagnostic_data')
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testCollectTestsRanZeroTests(self, mock_root, mock_exist_file, *args):
metrics_json = '{"metrics": {}}'
expected_test_results = {
'passed': [],
'failed': {'TESTS_DID_NOT_START': ['0 tests executed!']}}
mock_root.return_value = metrics_json
mock_exist_file.return_value = True
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('os.path.exists', autospec=True)
def testCollectTestsDidNotRun(self, mock_exist_file):
mock_exist_file.return_value = False
expected_test_results = {
'passed': [],
'failed': {
'TESTS_DID_NOT_START': [
'%s with staging data does not exist.' % OUTPUT_PATH
]
}
}
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('os.path.exists', autospec=True)
def testCollectTestsInterruptedRun(self, mock_exist_file):
mock_exist_file.side_effect = [True, False]
expected_test_results = {
'passed': [],
'failed': {
'BUILD_INTERRUPTED': [
'%s with test results does not exist.' % XCRESULT_PATH
]
}
}
self.assertEqual(
expected_test_results,
xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, []))
@mock.patch('subprocess.check_output', autospec=True)
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testCopyScreenshots(self, mock_xcresulttool_get, mock_path_exists,
mock_process):
mock_path_exists.return_value = True
mock_xcresulttool_get.side_effect = _xcresulttool_get_side_effect
xcode_log_parser.Xcode11LogParser().copy_artifacts(OUTPUT_PATH)
mock_process.assert_any_call([
'xcresulttool', 'export', '--type', 'file', '--id',
'SCREENSHOT_REF_ID_IN_FAILURE_SUMMARIES', '--path', XCRESULT_PATH,
'--output-path',
'/tmp/attempt_0_PageStateTestCase_testZeroContentOffsetAfterLoad_2.jpeg'
])
mock_process.assert_any_call([
'xcresulttool', 'export', '--type', 'file', '--id',
'CRASH_REF_ID_IN_ACTIVITY_SUMMARIES', '--path', XCRESULT_PATH,
'--output-path',
'/tmp/attempt_0_PageStateTestCase_testZeroContentOffsetAfterLoad_1'
'.crash'
])
# Ensures screenshots in activitySummaries are not copied.
self.assertEqual(2, mock_process.call_count)
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('subprocess.check_output', autospec=True)
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
def testExportDiagnosticData(self, mock_xcresulttool_get, mock_path_exists,
mock_process, _):
mock_path_exists.return_value = True
mock_xcresulttool_get.side_effect = _xcresulttool_get_side_effect
xcode_log_parser.Xcode11LogParser.export_diagnostic_data(OUTPUT_PATH)
mock_process.assert_called_with([
'xcresulttool', 'export', '--type', 'directory', '--id',
'DIAGNOSTICS_REF_ID', '--path', XCRESULT_PATH, '--output-path',
'/tmp/attempt_0.xcresult_diagnostic'
])
@mock.patch('os.path.exists', autospec=True)
def testCollectTestResults_interruptedTests(self, mock_path_exists):
mock_path_exists.side_effect = [True, False]
output = [
'[09:03:42:INFO] Test case \'-[TestCase1 method1]\' passed on device.',
'[09:06:40:INFO] Test Case \'-[TestCase2 method1]\' passed on device.',
'[09:09:00:INFO] Test case \'-[TestCase2 method1]\' failed on device.',
'** BUILD INTERRUPTED **',
]
not_found_message = ['%s with test results does not exist.' % XCRESULT_PATH]
res = xcode_log_parser.Xcode11LogParser().collect_test_results(
OUTPUT_PATH, output)
self.assertIn('BUILD_INTERRUPTED', res['failed'])
self.assertEqual(not_found_message + output,
res['failed']['BUILD_INTERRUPTED'])
self.assertEqual(['TestCase1/method1', 'TestCase2/method1'],
res['passed'])
@mock.patch('file_util.zip_and_remove_folder')
@mock.patch('xcode_log_parser.Xcode11LogParser.copy_artifacts')
@mock.patch('xcode_log_parser.Xcode11LogParser.export_diagnostic_data')
@mock.patch('os.path.exists', autospec=True)
@mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
@mock.patch('xcode_log_parser.Xcode11LogParser._list_of_failed_tests')
def testArtifactsDiagnosticLogsExportedInCollectTestTesults(
self, mock_get_failed_tests, mock_root, mock_exist_file,
mock_export_diagnostic_data, mock_copy_artifacts, mock_zip):
mock_root.side_effect = _xcresulttool_get_side_effect
mock_exist_file.return_value = True
xcode_log_parser.Xcode11LogParser().collect_test_results(OUTPUT_PATH, [])
mock_export_diagnostic_data.assert_called_with(OUTPUT_PATH)
mock_copy_artifacts.assert_called_with(OUTPUT_PATH)
if __name__ == '__main__':
unittest.main()
| 33.951987 | 153 | 0.52704 |
acdfed7f766850d196543b098ef45438ed5b411f | 60,485 | py | Python | api/tacticalrmm/alerts/tests.py | lcsnetworks/tacticalrmm | c9135f157394f51dd6ca3d43b18fa3ea0afea65b | [
"MIT"
] | null | null | null | api/tacticalrmm/alerts/tests.py | lcsnetworks/tacticalrmm | c9135f157394f51dd6ca3d43b18fa3ea0afea65b | [
"MIT"
] | null | null | null | api/tacticalrmm/alerts/tests.py | lcsnetworks/tacticalrmm | c9135f157394f51dd6ca3d43b18fa3ea0afea65b | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from itertools import cycle
from unittest.mock import patch
from alerts.tasks import cache_agents_alert_template
from core.models import CoreSettings
from core.tasks import cache_db_fields_task
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from .models import Alert, AlertTemplate
from .serializers import (
AlertSerializer,
AlertTemplateRelationSerializer,
AlertTemplateSerializer,
)
base_url = "/alerts"
class TestAlertsViews(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
def test_get_alerts(self):
url = "/alerts/"
# create check, task, and agent to test each serializer function
check = baker.make_recipe("checks.diskspace_check")
task = baker.make("autotasks.AutomatedTask")
agent = baker.make_recipe("agents.agent")
# setup data
alerts = baker.make(
"alerts.Alert",
agent=agent,
alert_time=seq(datetime.now(), timedelta(days=15)),
severity="warning",
_quantity=3,
)
baker.make(
"alerts.Alert",
assigned_check=check,
alert_time=seq(datetime.now(), timedelta(days=15)),
severity="error",
_quantity=7,
)
baker.make(
"alerts.Alert",
assigned_task=task,
snoozed=True,
snooze_until=djangotime.now(),
alert_time=seq(datetime.now(), timedelta(days=15)),
_quantity=2,
)
baker.make(
"alerts.Alert",
agent=agent,
resolved=True,
resolved_on=djangotime.now(),
alert_time=seq(datetime.now(), timedelta(days=15)),
_quantity=9,
)
# test top alerts for alerts icon
data = {"top": 3}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEquals(resp.data["alerts"], AlertSerializer(alerts, many=True).data) # type: ignore
self.assertEquals(resp.data["alerts_count"], 10) # type: ignore
# test filter data
# test data and result counts
data = [
{
"filter": {
"timeFilter": 30,
"snoozedFilter": True,
"resolvedFilter": False,
},
"count": 12,
},
{
"filter": {
"timeFilter": 45,
"snoozedFilter": False,
"resolvedFilter": False,
},
"count": 10,
},
{
"filter": {
"severityFilter": ["error"],
"snoozedFilter": False,
"resolvedFilter": True,
"timeFilter": 20,
},
"count": 7,
},
{
"filter": {
"clientFilter": [],
"snoozedFilter": True,
"resolvedFilter": False,
},
"count": 0,
},
{"filter": {}, "count": 21},
{"filter": {"snoozedFilter": True, "resolvedFilter": False}, "count": 12},
]
for req in data:
resp = self.client.patch(url, req["filter"], format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), req["count"]) # type: ignore
self.check_not_authenticated("patch", url)
def test_add_alert(self):
url = "/alerts/"
agent = baker.make_recipe("agents.agent")
data = {
"alert_time": datetime.now(),
"agent": agent.id,
"severity": "warning",
"alert_type": "availability",
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.check_not_authenticated("post", url)
def test_get_alert(self):
# returns 404 for invalid alert pk
resp = self.client.get("/alerts/500/", format="json")
self.assertEqual(resp.status_code, 404)
alert = baker.make("alerts.Alert")
url = f"/alerts/{alert.pk}/" # type: ignore
resp = self.client.get(url, format="json")
serializer = AlertSerializer(alert)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data) # type: ignore
self.check_not_authenticated("get", url)
def test_update_alert(self):
# returns 404 for invalid alert pk
resp = self.client.put("/alerts/500/", format="json")
self.assertEqual(resp.status_code, 404)
alert = baker.make("alerts.Alert", resolved=False, snoozed=False)
url = f"/alerts/{alert.pk}/" # type: ignore
# test resolving alert
data = {
"type": "resolve",
}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(Alert.objects.get(pk=alert.pk).resolved) # type: ignore
self.assertTrue(Alert.objects.get(pk=alert.pk).resolved_on) # type: ignore
# test snoozing alert
data = {"type": "snooze", "snooze_days": "30"} # type: ignore
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(Alert.objects.get(pk=alert.pk).snoozed) # type: ignore
self.assertTrue(Alert.objects.get(pk=alert.pk).snooze_until) # type: ignore
# test snoozing alert without snooze_days
data = {"type": "snooze"} # type: ignore
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 400)
# test unsnoozing alert
data = {"type": "unsnooze"} # type: ignore
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(Alert.objects.get(pk=alert.pk).snoozed) # type: ignore
self.assertFalse(Alert.objects.get(pk=alert.pk).snooze_until) # type: ignore
# test invalid type
data = {"type": "invalid"} # type: ignore
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 400)
self.check_not_authenticated("put", url)
def test_delete_alert(self):
# returns 404 for invalid alert pk
resp = self.client.put("/alerts/500/", format="json")
self.assertEqual(resp.status_code, 404)
alert = baker.make("alerts.Alert")
# test delete alert
url = f"/alerts/{alert.pk}/" # type: ignore
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(Alert.objects.filter(pk=alert.pk).exists()) # type: ignore
self.check_not_authenticated("delete", url)
def test_bulk_alert_actions(self):
url = "/alerts/bulk/"
# setup data
alerts = baker.make("alerts.Alert", resolved=False, _quantity=3)
# test invalid data
data = {"bulk_action": "invalid"}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 400)
# test snooze without snooze days
data = {"bulk_action": "snooze"}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 400)
# test bulk snoozing alerts
data = {
"bulk_action": "snooze",
"alerts": [alert.pk for alert in alerts], # type: ignore
"snooze_days": "30",
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(Alert.objects.filter(snoozed=False).exists())
# test bulk resolving alerts
data = {"bulk_action": "resolve", "alerts": [alert.pk for alert in alerts]} # type: ignore
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(Alert.objects.filter(resolved=False).exists())
self.assertTrue(Alert.objects.filter(snoozed=False).exists())
def test_get_alert_templates(self):
url = "/alerts/templates/"
alert_templates = baker.make("alerts.AlertTemplate", _quantity=3)
resp = self.client.get(url, format="json")
serializer = AlertTemplateSerializer(alert_templates, many=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data) # type: ignore
self.check_not_authenticated("get", url)
def test_add_alert_template(self):
url = "/alerts/templates/"
data = {
"name": "Test Template",
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.check_not_authenticated("post", url)
def test_get_alert_template(self):
# returns 404 for invalid alert template pk
resp = self.client.get("/alerts/templates/500/", format="json")
self.assertEqual(resp.status_code, 404)
alert_template = baker.make("alerts.AlertTemplate")
url = f"/alerts/templates/{alert_template.pk}/" # type: ignore
resp = self.client.get(url, format="json")
serializer = AlertTemplateSerializer(alert_template)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data) # type: ignore
self.check_not_authenticated("get", url)
def test_update_alert_template(self):
# returns 404 for invalid alert pk
resp = self.client.put("/alerts/templates/500/", format="json")
self.assertEqual(resp.status_code, 404)
alert_template = baker.make("alerts.AlertTemplate")
url = f"/alerts/templates/{alert_template.pk}/" # type: ignore
# test data
data = {
"agent_email_on_resolved": True,
"agent_text_on_resolved": True,
"agent_include_desktops": True,
"agent_always_email": True,
"agent_always_text": True,
"agent_always_alert": True,
"agent_periodic_alert_days": "90",
}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.check_not_authenticated("put", url)
def test_delete_alert_template(self):
# returns 404 for invalid alert pk
resp = self.client.put("/alerts/templates/500/", format="json")
self.assertEqual(resp.status_code, 404)
alert_template = baker.make("alerts.AlertTemplate")
# test delete alert
url = f"/alerts/templates/{alert_template.pk}/" # type: ignore
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(AlertTemplate.objects.filter(pk=alert_template.pk).exists()) # type: ignore
self.check_not_authenticated("delete", url)
def test_alert_template_related(self):
# setup data
alert_template = baker.make("alerts.AlertTemplate")
baker.make("clients.Client", alert_template=alert_template, _quantity=2)
baker.make("clients.Site", alert_template=alert_template, _quantity=3)
baker.make("automation.Policy", alert_template=alert_template)
core = CoreSettings.objects.first()
core.alert_template = alert_template # type: ignore
core.save() # type: ignore
url = f"/alerts/templates/{alert_template.pk}/related/" # type: ignore
resp = self.client.get(url, format="json")
serializer = AlertTemplateRelationSerializer(alert_template)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data) # type: ignore
self.assertEqual(len(resp.data["policies"]), 1) # type: ignore
self.assertEqual(len(resp.data["clients"]), 2) # type: ignore
self.assertEqual(len(resp.data["sites"]), 3) # type: ignore
self.assertTrue(
AlertTemplate.objects.get(pk=alert_template.pk).is_default_template # type: ignore
)
class TestAlertTasks(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
def test_unsnooze_alert_task(self):
from alerts.tasks import unsnooze_alerts
# these will be unsnoozed whent eh function is run
not_snoozed = baker.make(
"alerts.Alert",
snoozed=True,
snooze_until=seq(datetime.now(), timedelta(days=15)),
_quantity=5,
)
# these will still be snoozed after the function is run
snoozed = baker.make(
"alerts.Alert",
snoozed=True,
snooze_until=seq(datetime.now(), timedelta(days=-15)),
_quantity=5,
)
unsnooze_alerts()
self.assertFalse(
Alert.objects.filter(
pk__in=[alert.pk for alert in not_snoozed], snoozed=False # type: ignore
).exists()
)
self.assertTrue(
Alert.objects.filter(
pk__in=[alert.pk for alert in snoozed], snoozed=False # type: ignore
).exists()
)
def test_agent_gets_correct_alert_template(self):
core = CoreSettings.objects.first()
# setup data
workstation = baker.make_recipe("agents.agent", monitoring_type="workstation")
server = baker.make_recipe("agents.agent", monitoring_type="server")
policy = baker.make("automation.Policy", active=True)
alert_templates = baker.make("alerts.AlertTemplate", _quantity=6)
# should be None
self.assertFalse(workstation.set_alert_template())
self.assertFalse(server.set_alert_template())
# assign first Alert Template as to a policy and apply it as default
policy.alert_template = alert_templates[0] # type: ignore
policy.save() # type: ignore
core.workstation_policy = policy # type: ignore
core.server_policy = policy # type: ignore
core.save() # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[0].pk) # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[0].pk) # type: ignore
# assign second Alert Template to as default alert template
core.alert_template = alert_templates[1] # type: ignore
core.save() # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[1].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[1].pk) # type: ignore
# assign third Alert Template to client
workstation.client.alert_template = alert_templates[2] # type: ignore
server.client.alert_template = alert_templates[2] # type: ignore
workstation.client.save()
server.client.save()
self.assertEquals(workstation.set_alert_template().pk, alert_templates[2].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[2].pk) # type: ignore
# apply policy to client and should override
workstation.client.workstation_policy = policy
server.client.server_policy = policy
workstation.client.save()
server.client.save()
self.assertEquals(workstation.set_alert_template().pk, alert_templates[0].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[0].pk) # type: ignore
# assign fouth Alert Template to site
workstation.site.alert_template = alert_templates[3] # type: ignore
server.site.alert_template = alert_templates[3] # type: ignore
workstation.site.save()
server.site.save()
self.assertEquals(workstation.set_alert_template().pk, alert_templates[3].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[3].pk) # type: ignore
# apply policy to site
workstation.site.workstation_policy = policy
server.site.server_policy = policy
workstation.site.save()
server.site.save()
self.assertEquals(workstation.set_alert_template().pk, alert_templates[0].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[0].pk) # type: ignore
# apply policy to agents
workstation.policy = policy
server.policy = policy
workstation.save()
server.save()
self.assertEquals(workstation.set_alert_template().pk, alert_templates[0].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[0].pk) # type: ignore
# test disabling alert template
alert_templates[0].is_active = False # type: ignore
alert_templates[0].save() # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[3].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[3].pk) # type: ignore
# test policy exclusions
alert_templates[3].excluded_agents.set([workstation.pk]) # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[2].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[3].pk) # type: ignore
# test workstation exclusions
alert_templates[2].exclude_workstations = True # type: ignore
alert_templates[2].save() # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[1].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[3].pk) # type: ignore
# test server exclusions
alert_templates[3].exclude_servers = True # type: ignore
alert_templates[3].save() # type: ignore
self.assertEquals(workstation.set_alert_template().pk, alert_templates[1].pk) # type: ignore
self.assertEquals(server.set_alert_template().pk, alert_templates[2].pk) # type: ignore
@patch("agents.tasks.sleep")
@patch("core.models.CoreSettings.send_mail")
@patch("core.models.CoreSettings.send_sms")
@patch("agents.tasks.agent_outage_sms_task.delay")
@patch("agents.tasks.agent_outage_email_task.delay")
@patch("agents.tasks.agent_recovery_email_task.delay")
@patch("agents.tasks.agent_recovery_sms_task.delay")
def test_handle_agent_alerts(
self,
recovery_sms,
recovery_email,
outage_email,
outage_sms,
send_sms,
send_email,
sleep,
):
from agents.models import Agent
from agents.tasks import (
agent_outage_email_task,
agent_outage_sms_task,
agent_outages_task,
agent_recovery_email_task,
agent_recovery_sms_task,
)
from alerts.models import Alert
agent_dashboard_alert = baker.make_recipe("agents.overdue_agent")
# call outages task and no alert should be created
agent_outages_task()
self.assertEquals(Alert.objects.count(), 0)
# set overdue_dashboard_alert and alert should be created
agent_dashboard_alert.overdue_dashboard_alert = True
agent_dashboard_alert.save()
# create other agents with various alert settings
alert_template_always_alert = baker.make(
"alerts.AlertTemplate", is_active=True, agent_always_alert=True
)
alert_template_always_text = baker.make(
"alerts.AlertTemplate",
is_active=True,
agent_always_text=True,
agent_periodic_alert_days=5,
)
alert_template_always_email = baker.make(
"alerts.AlertTemplate",
is_active=True,
agent_always_email=True,
agent_periodic_alert_days=5,
)
alert_template_blank = baker.make("alerts.AlertTemplate", is_active=True)
agent_template_email = baker.make_recipe("agents.overdue_agent")
agent_template_dashboard = baker.make_recipe("agents.overdue_agent")
agent_template_text = baker.make_recipe("agents.overdue_agent")
agent_template_blank = baker.make_recipe("agents.overdue_agent")
# assign alert templates to agent's clients
agent_template_email.client.alert_template = alert_template_always_email
agent_template_email.client.save()
agent_template_dashboard.client.alert_template = alert_template_always_alert
agent_template_dashboard.client.save()
agent_template_text.client.alert_template = alert_template_always_text
agent_template_text.client.save()
agent_template_blank.client.alert_template = alert_template_blank
agent_template_blank.client.save()
agent_text_alert = baker.make_recipe(
"agents.overdue_agent", overdue_text_alert=True
)
agent_email_alert = baker.make_recipe(
"agents.overdue_agent", overdue_email_alert=True
)
cache_agents_alert_template()
agent_outages_task()
# should have created 6 alerts
self.assertEquals(Alert.objects.count(), 6)
# other specific agents should have created alerts
self.assertEquals(Alert.objects.filter(agent=agent_dashboard_alert).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_text_alert).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_email_alert).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_template_email).count(), 1)
self.assertEquals(
Alert.objects.filter(agent=agent_template_dashboard).count(), 1
)
self.assertEquals(Alert.objects.filter(agent=agent_template_text).count(), 1)
self.assertEquals(Alert.objects.filter(agent=agent_template_blank).count(), 0)
# check if email and text tasks were called
self.assertEquals(outage_email.call_count, 2)
self.assertEquals(outage_sms.call_count, 2)
outage_sms.assert_any_call(
pk=Alert.objects.get(agent=agent_text_alert).pk, alert_interval=None
)
outage_sms.assert_any_call(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
outage_email.assert_any_call(
pk=Alert.objects.get(agent=agent_email_alert).pk, alert_interval=None
)
outage_email.assert_any_call(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
# call the email/sms outage tasks synchronously
agent_outage_sms_task(
pk=Alert.objects.get(agent=agent_text_alert).pk, alert_interval=None
)
agent_outage_email_task(
pk=Alert.objects.get(agent=agent_email_alert).pk, alert_interval=None
)
agent_outage_sms_task(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
agent_outage_email_task(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
# check if email/text sent was set
self.assertTrue(Alert.objects.get(agent=agent_text_alert).sms_sent)
self.assertFalse(Alert.objects.get(agent=agent_text_alert).email_sent)
self.assertTrue(Alert.objects.get(agent=agent_email_alert).email_sent)
self.assertFalse(Alert.objects.get(agent=agent_email_alert).sms_sent)
self.assertTrue(Alert.objects.get(agent=agent_template_text).sms_sent)
self.assertTrue(Alert.objects.get(agent=agent_template_email).email_sent)
self.assertFalse(Alert.objects.get(agent=agent_dashboard_alert).email_sent)
self.assertFalse(Alert.objects.get(agent=agent_dashboard_alert).sms_sent)
# calling agent outage task again shouldn't create duplicate alerts and won't send alerts
agent_outages_task()
self.assertEquals(Alert.objects.count(), 6)
# test periodic notification
# change email/text sent to sometime in the past
alert_text = Alert.objects.get(agent=agent_template_text)
alert_text.sms_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_text.save()
alert_email = Alert.objects.get(agent=agent_template_email)
alert_email.email_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_email.save()
send_sms.reset_mock()
send_email.reset_mock()
agent_outages_task()
outage_sms.assert_any_call(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
outage_email.assert_any_call(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
agent_outage_sms_task(
pk=Alert.objects.get(agent=agent_template_text).pk, alert_interval=5
)
agent_outage_email_task(
pk=Alert.objects.get(agent=agent_template_email).pk, alert_interval=5
)
self.assertEqual(send_sms.call_count, 1)
self.assertEqual(send_email.call_count, 1)
# test resolved alerts
# alter the alert template to email and test on resolved
alert_template_always_email.agent_email_on_resolved = True # type: ignore
alert_template_always_email.save() # type: ignore
alert_template_always_text.agent_text_on_resolved = True # type: ignore
alert_template_always_text.save() # type: ignore
agent_template_text = Agent.objects.get(pk=agent_template_text.pk)
agent_template_email = Agent.objects.get(pk=agent_template_email.pk)
# have the two agents checkin
url = "/api/v3/checkin/"
agent_template_text.version = settings.LATEST_AGENT_VER
agent_template_text.last_seen = djangotime.now()
agent_template_text.save()
agent_template_email.version = settings.LATEST_AGENT_VER
agent_template_email.last_seen = djangotime.now()
agent_template_email.save()
cache_db_fields_task()
recovery_sms.assert_called_with(
pk=Alert.objects.get(agent=agent_template_text).pk
)
recovery_email.assert_any_call(
pk=Alert.objects.get(agent=agent_template_email).pk
)
agent_recovery_sms_task(pk=Alert.objects.get(agent=agent_template_text).pk)
agent_recovery_email_task(pk=Alert.objects.get(agent=agent_template_email).pk)
self.assertTrue(Alert.objects.get(agent=agent_template_text).resolved_sms_sent)
self.assertTrue(
Alert.objects.get(agent=agent_template_email).resolved_email_sent
)
@patch("checks.tasks.sleep")
@patch("core.models.CoreSettings.send_mail")
@patch("core.models.CoreSettings.send_sms")
@patch("checks.tasks.handle_check_sms_alert_task.delay")
@patch("checks.tasks.handle_check_email_alert_task.delay")
@patch("checks.tasks.handle_resolved_check_email_alert_task.delay")
@patch("checks.tasks.handle_resolved_check_sms_alert_task.delay")
def test_handle_check_alerts(
self,
resolved_sms,
resolved_email,
outage_email,
outage_sms,
send_sms,
send_email,
sleep,
):
from alerts.tasks import cache_agents_alert_template
from checks.models import Check
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
# create test data
agent = baker.make_recipe("agents.agent")
agent_no_settings = baker.make_recipe("agents.agent")
agent_template_email = baker.make_recipe("agents.agent")
agent_template_dashboard_text = baker.make_recipe("agents.agent")
agent_template_blank = baker.make_recipe("agents.agent")
# add disks to diskspace check agent
agent.disks = [
{
"free": "64.7G",
"used": "167.6G",
"total": "232.3G",
"device": "C:",
"fstype": "NTFS",
"percent": 72,
}
]
agent.save()
# create agent with template to always email on warning severity
alert_template_email = baker.make(
"alerts.AlertTemplate",
is_active=True,
check_always_email=True,
check_email_alert_severity=["warning"],
)
agent_template_email.client.alert_template = alert_template_email
agent_template_email.client.save()
# create agent with template to always dashboard and text on various alert severities
alert_template_dashboard_text = baker.make(
"alerts.AlertTemplate",
is_active=True,
check_always_alert=True,
check_always_text=True,
check_dashboard_alert_severity=["info", "warning", "error"],
check_text_alert_severity=["error"],
)
agent_template_dashboard_text.client.alert_template = (
alert_template_dashboard_text
)
agent_template_dashboard_text.client.save()
# create agent with blank template
alert_template_blank = baker.make("alerts.AlertTemplate", is_active=True)
agent_template_blank.client.alert_template = alert_template_blank
agent_template_blank.client.save()
# create some checks per agent above
check_agent = baker.make_recipe(
"checks.diskspace_check",
agent=agent,
email_alert=True,
text_alert=True,
dashboard_alert=True,
alert_severity="warning",
)
check_template_email = baker.make_recipe(
"checks.cpuload_check", agent=agent_template_email, history=[50, 40, 30]
)
check_template_dashboard_text = baker.make_recipe(
"checks.memory_check",
agent=agent_template_dashboard_text,
history=[50, 40, 30],
)
check_template_blank = baker.make_recipe(
"checks.ping_check", agent=agent_template_blank
)
check_no_settings = baker.make_recipe(
"checks.script_check", agent=agent_no_settings
)
# update alert template and pull new checks from DB
cache_agents_alert_template()
check_template_email = Check.objects.get(pk=check_template_email.pk)
check_template_dashboard_text = Check.objects.get(
pk=check_template_dashboard_text.pk
)
check_template_blank = Check.objects.get(pk=check_template_blank.pk)
# test agent with check that has alert settings
check_agent.alert_severity = "warning"
check_agent.status = "failing"
Alert.handle_alert_failure(check_agent)
# alert should have been created and sms, email notifications sent
self.assertTrue(Alert.objects.filter(assigned_check=check_agent).exists())
alertpk = Alert.objects.get(assigned_check=check_agent).pk
outage_sms.assert_called_with(pk=alertpk, alert_interval=None)
outage_email.assert_called_with(pk=alertpk, alert_interval=None)
outage_sms.reset_mock()
outage_email.reset_mock()
# call outage email/sms tasks synchronously
handle_check_sms_alert_task(pk=alertpk, alert_interval=None)
handle_check_email_alert_task(pk=alertpk, alert_interval=None)
alert = Alert.objects.get(assigned_check=check_agent)
# make sure the email/text sent fields were set
self.assertTrue(alert.email_sent)
self.assertTrue(alert.sms_sent)
# make sure the dashboard alert will be visible since dashboard_alert is enabled
self.assertFalse(alert.hidden)
send_email.assert_called()
send_sms.assert_called()
send_email.reset_mock()
send_sms.reset_mock()
# test check with an agent that has an email always alert template
Alert.handle_alert_failure(check_template_email)
self.assertTrue(Alert.objects.filter(assigned_check=check_template_email))
alertpk = Alert.objects.get(assigned_check=check_template_email).pk
outage_sms.assert_not_called()
outage_email.assert_called_with(pk=alertpk, alert_interval=0)
outage_email.reset_mock()
# call outage email task synchronously
handle_check_email_alert_task(pk=alertpk, alert_interval=0)
# make sure dashboard alert is hidden
self.assertTrue(Alert.objects.get(assigned_check=check_template_email).hidden)
send_email.assert_called()
send_email.reset_mock()
# test check with an agent that has an email always alert template
Alert.handle_alert_failure(check_template_dashboard_text)
self.assertTrue(
Alert.objects.filter(assigned_check=check_template_dashboard_text).exists()
)
alertpk = Alert.objects.get(assigned_check=check_template_dashboard_text).pk
# should only trigger when alert with severity of error
outage_sms.assert_not_called
# update check alert seveity to error
check_template_dashboard_text.alert_severity = "error"
check_template_dashboard_text.save()
# now should trigger alert
Alert.handle_alert_failure(check_template_dashboard_text)
outage_sms.assert_called_with(pk=alertpk, alert_interval=0)
outage_sms.reset_mock()
# call outage email task synchronously
handle_check_sms_alert_task(pk=alertpk, alert_interval=0)
# make sure dashboard alert is not hidden
self.assertFalse(
Alert.objects.get(assigned_check=check_template_dashboard_text).hidden
)
send_sms.assert_called()
send_sms.reset_mock()
# test check with an agent that has a blank alert template
Alert.handle_alert_failure(check_template_blank)
self.assertFalse(
Alert.objects.filter(assigned_check=check_template_blank).exists()
)
# test check that has no template and no settings
Alert.handle_alert_failure(check_no_settings)
self.assertFalse(
Alert.objects.filter(assigned_check=check_no_settings).exists()
)
# test periodic notifications
# make sure a failing check won't trigger another notification and only create a single alert
Alert.handle_alert_failure(check_template_email)
send_email.assert_not_called()
send_sms.assert_not_called()
self.assertEquals(
Alert.objects.filter(assigned_check=check_template_email).count(), 1
)
alert_template_email.check_periodic_alert_days = 1 # type: ignore
alert_template_email.save() # type: ignore
alert_template_dashboard_text.check_periodic_alert_days = 1 # type: ignore
alert_template_dashboard_text.save() # type: ignore
# set last email time for alert in the past
alert_email = Alert.objects.get(assigned_check=check_template_email)
alert_email.email_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_email.save()
# set last email time for alert in the past
alert_sms = Alert.objects.get(assigned_check=check_template_dashboard_text)
alert_sms.sms_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_sms.save()
# refresh checks to get alert template changes
check_template_email = Check.objects.get(pk=check_template_email.pk)
check_template_dashboard_text = Check.objects.get(
pk=check_template_dashboard_text.pk
)
check_template_blank = Check.objects.get(pk=check_template_blank.pk)
Alert.handle_alert_failure(check_template_email)
Alert.handle_alert_failure(check_template_dashboard_text)
outage_email.assert_called_with(pk=alert_email.pk, alert_interval=1)
outage_sms.assert_called_with(pk=alert_sms.pk, alert_interval=1)
outage_email.reset_mock()
outage_sms.reset_mock()
# test resolving alerts
Alert.handle_alert_resolve(check_agent)
self.assertTrue(Alert.objects.get(assigned_check=check_agent).resolved)
self.assertTrue(Alert.objects.get(assigned_check=check_agent).resolved_on)
resolved_sms.assert_not_called()
resolved_email.assert_not_called()
# test resolved notifications
alert_template_email.check_email_on_resolved = True # type: ignore
alert_template_email.save() # type: ignore
alert_template_dashboard_text.check_text_on_resolved = True # type: ignore
alert_template_dashboard_text.save() # type: ignore
# refresh checks to get alert template changes
check_template_email = Check.objects.get(pk=check_template_email.pk)
check_template_dashboard_text = Check.objects.get(
pk=check_template_dashboard_text.pk
)
check_template_blank = Check.objects.get(pk=check_template_blank.pk)
Alert.handle_alert_resolve(check_template_email)
resolved_email.assert_called_with(pk=alert_email.pk)
resolved_sms.assert_not_called()
resolved_email.reset_mock()
Alert.handle_alert_resolve(check_template_dashboard_text)
resolved_sms.assert_called_with(pk=alert_sms.pk)
resolved_email.assert_not_called()
# call the email and sms tasks synchronously
handle_resolved_check_sms_alert_task(pk=alert_sms.pk)
handle_resolved_check_email_alert_task(pk=alert_email.pk)
send_email.assert_called()
send_sms.assert_called()
@patch("autotasks.tasks.sleep")
@patch("core.models.CoreSettings.send_mail")
@patch("core.models.CoreSettings.send_sms")
@patch("autotasks.tasks.handle_task_sms_alert.delay")
@patch("autotasks.tasks.handle_task_email_alert.delay")
@patch("autotasks.tasks.handle_resolved_task_email_alert.delay")
@patch("autotasks.tasks.handle_resolved_task_sms_alert.delay")
def test_handle_task_alerts(
self,
resolved_sms,
resolved_email,
outage_email,
outage_sms,
send_sms,
send_email,
sleep,
):
from alerts.tasks import cache_agents_alert_template
from autotasks.models import AutomatedTask
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
handle_task_email_alert,
handle_task_sms_alert,
)
# create test data
agent = baker.make_recipe("agents.agent")
agent_no_settings = baker.make_recipe("agents.agent")
agent_template_email = baker.make_recipe("agents.agent")
agent_template_dashboard_text = baker.make_recipe("agents.agent")
agent_template_blank = baker.make_recipe("agents.agent")
# create agent with template to always email on warning severity
alert_template_email = baker.make(
"alerts.AlertTemplate",
is_active=True,
task_always_email=True,
task_email_alert_severity=["warning"],
)
agent_template_email.client.alert_template = alert_template_email
agent_template_email.client.save()
# create agent with template to always dashboard and text on various alert severities
alert_template_dashboard_text = baker.make(
"alerts.AlertTemplate",
is_active=True,
task_always_alert=True,
task_always_text=True,
task_dashboard_alert_severity=["info", "warning", "error"],
task_text_alert_severity=["error"],
)
agent_template_dashboard_text.client.alert_template = (
alert_template_dashboard_text
)
agent_template_dashboard_text.client.save()
# create agent with blank template
alert_template_blank = baker.make("alerts.AlertTemplate", is_active=True)
agent_template_blank.client.alert_template = alert_template_blank
agent_template_blank.client.save()
# create some tasks per agent above
task_agent = baker.make(
"autotasks.AutomatedTask",
agent=agent,
email_alert=True,
text_alert=True,
dashboard_alert=True,
alert_severity="warning",
)
task_template_email = baker.make(
"autotasks.AutomatedTask",
agent=agent_template_email,
alert_severity="warning",
)
task_template_dashboard_text = baker.make(
"autotasks.AutomatedTask",
agent=agent_template_dashboard_text,
alert_severity="info",
)
task_template_blank = baker.make(
"autotasks.AutomatedTask",
agent=agent_template_blank,
alert_severity="error",
)
task_no_settings = baker.make(
"autotasks.AutomatedTask", agent=agent_no_settings, alert_severity="warning"
)
# update alert template and pull new checks from DB
cache_agents_alert_template()
task_template_email = AutomatedTask.objects.get(pk=task_template_email.pk) # type: ignore
task_template_dashboard_text = AutomatedTask.objects.get(pk=task_template_dashboard_text.pk) # type: ignore
task_template_blank = AutomatedTask.objects.get(pk=task_template_blank.pk) # type: ignore
# test agent with task that has alert settings
Alert.handle_alert_failure(task_agent) # type: ignore
# alert should have been created and sms, email notifications sent
self.assertTrue(Alert.objects.filter(assigned_task=task_agent).exists())
alertpk = Alert.objects.get(assigned_task=task_agent).pk
outage_sms.assert_called_with(pk=alertpk, alert_interval=None)
outage_email.assert_called_with(pk=alertpk, alert_interval=None)
outage_sms.reset_mock()
outage_email.reset_mock()
# call outage email/sms tasks synchronously
handle_task_sms_alert(pk=alertpk, alert_interval=None)
handle_task_email_alert(pk=alertpk, alert_interval=None)
alert = Alert.objects.get(assigned_task=task_agent)
# make sure the email/text sent fields were set
self.assertTrue(alert.email_sent)
self.assertTrue(alert.sms_sent)
# make sure the dashboard alert will be visible since dashboard_alert is enabled
self.assertFalse(alert.hidden)
send_email.assert_called()
send_sms.assert_called()
send_email.reset_mock()
send_sms.reset_mock()
# test task with an agent that has an email always alert template
Alert.handle_alert_failure(task_template_email) # type: ignore
self.assertTrue(Alert.objects.filter(assigned_task=task_template_email))
alertpk = Alert.objects.get(assigned_task=task_template_email).pk
outage_sms.assert_not_called()
outage_email.assert_called_with(pk=alertpk, alert_interval=0)
outage_email.reset_mock()
# call outage email task synchronously
handle_task_email_alert(pk=alertpk, alert_interval=0)
# make sure dashboard alert is hidden
self.assertTrue(Alert.objects.get(assigned_task=task_template_email).hidden)
send_email.assert_called()
send_email.reset_mock()
# test task with an agent that has an email always alert template
Alert.handle_alert_failure(task_template_dashboard_text) # type: ignore
self.assertTrue(
Alert.objects.filter(assigned_task=task_template_dashboard_text).exists()
)
alertpk = Alert.objects.get(assigned_task=task_template_dashboard_text).pk
# should only trigger when alert with severity of error
outage_sms.assert_not_called
# update task alert seveity to error
task_template_dashboard_text.alert_severity = "error" # type: ignore
task_template_dashboard_text.save() # type: ignore
# now should trigger alert
Alert.handle_alert_failure(task_template_dashboard_text) # type: ignore
outage_sms.assert_called_with(pk=alertpk, alert_interval=0)
outage_sms.reset_mock()
# call outage email task synchronously
handle_task_sms_alert(pk=alertpk, alert_interval=0)
# make sure dashboard alert is not hidden
self.assertFalse(
Alert.objects.get(assigned_task=task_template_dashboard_text).hidden
)
send_sms.assert_called()
send_sms.reset_mock()
# test task with an agent that has a blank alert template
Alert.handle_alert_failure(task_template_blank) # type: ignore
self.assertFalse(
Alert.objects.filter(assigned_task=task_template_blank).exists()
)
# test task that has no template and no settings
Alert.handle_alert_failure(task_no_settings) # type: ignore
self.assertFalse(Alert.objects.filter(assigned_task=task_no_settings).exists())
# test periodic notifications
# make sure a failing task won't trigger another notification and only create a single alert
Alert.handle_alert_failure(task_template_email) # type: ignore
send_email.assert_not_called()
send_sms.assert_not_called()
self.assertEquals(
Alert.objects.filter(assigned_task=task_template_email).count(), 1
)
alert_template_email.task_periodic_alert_days = 1 # type: ignore
alert_template_email.save() # type: ignore
alert_template_dashboard_text.task_periodic_alert_days = 1 # type: ignore
alert_template_dashboard_text.save() # type: ignore
# set last email time for alert in the past
alert_email = Alert.objects.get(assigned_task=task_template_email)
alert_email.email_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_email.save()
# set last email time for alert in the past
alert_sms = Alert.objects.get(assigned_task=task_template_dashboard_text)
alert_sms.sms_sent = djangotime.now() - djangotime.timedelta(days=20)
alert_sms.save()
# refresh automated tasks to get new alert templates
task_template_email = AutomatedTask.objects.get(pk=task_template_email.pk) # type: ignore
task_template_dashboard_text = AutomatedTask.objects.get(pk=task_template_dashboard_text.pk) # type: ignore
task_template_blank = AutomatedTask.objects.get(pk=task_template_blank.pk) # type: ignore
Alert.handle_alert_failure(task_template_email) # type: ignore
Alert.handle_alert_failure(task_template_dashboard_text) # type: ignore
outage_email.assert_called_with(pk=alert_email.pk, alert_interval=1)
outage_sms.assert_called_with(pk=alert_sms.pk, alert_interval=1)
outage_email.reset_mock()
outage_sms.reset_mock()
# test resolving alerts
Alert.handle_alert_resolve(task_agent) # type: ignore
self.assertTrue(Alert.objects.get(assigned_task=task_agent).resolved)
self.assertTrue(Alert.objects.get(assigned_task=task_agent).resolved_on)
resolved_sms.assert_not_called()
resolved_email.assert_not_called()
# test resolved notifications
alert_template_email.task_email_on_resolved = True # type: ignore
alert_template_email.save() # type: ignore
alert_template_dashboard_text.task_text_on_resolved = True # type: ignore
alert_template_dashboard_text.save() # type: ignore
# refresh automated tasks to get new alert templates
task_template_email = AutomatedTask.objects.get(pk=task_template_email.pk) # type: ignore
task_template_dashboard_text = AutomatedTask.objects.get(pk=task_template_dashboard_text.pk) # type: ignore
task_template_blank = AutomatedTask.objects.get(pk=task_template_blank.pk) # type: ignore
Alert.handle_alert_resolve(task_template_email) # type: ignore
resolved_email.assert_called_with(pk=alert_email.pk)
resolved_sms.assert_not_called()
resolved_email.reset_mock()
Alert.handle_alert_resolve(task_template_dashboard_text) # type: ignore
resolved_sms.assert_called_with(pk=alert_sms.pk)
resolved_email.assert_not_called()
# call the email and sms tasks synchronously
handle_resolved_task_sms_alert(pk=alert_sms.pk)
handle_resolved_task_email_alert(pk=alert_email.pk)
send_email.assert_called()
send_sms.assert_called()
@patch("core.models.TwClient")
@patch("smtplib.SMTP")
def test_override_core_settings(self, smtp, sms):
from core.models import CoreSettings
# setup data
alert_template = baker.make(
"alerts.AlertTemplate",
email_recipients=["example@example.com"],
text_recipients=["+12321233212"],
email_from="from@email.com",
)
core = CoreSettings.objects.first()
core.smtp_host = "test.test.com" # type: ignore
core.smtp_port = 587 # type: ignore
core.smtp_recipients = ["recipient@test.com"] # type: ignore
core.twilio_account_sid = "test" # type: ignore
core.twilio_auth_token = "1234123412341234" # type: ignore
core.sms_alert_recipients = ["+1234567890"] # type: ignore
# test sending email with alert template settings
core.send_mail("Test", "Test", alert_template=alert_template) # type: ignore
core.send_sms("Test", alert_template=alert_template) # type: ignore
@patch("agents.models.Agent.nats_cmd")
@patch("agents.tasks.agent_outage_sms_task.delay")
@patch("agents.tasks.agent_outage_email_task.delay")
@patch("agents.tasks.agent_recovery_email_task.delay")
@patch("agents.tasks.agent_recovery_sms_task.delay")
def test_alert_actions(
self, recovery_sms, recovery_email, outage_email, outage_sms, nats_cmd
):
from agents.tasks import agent_outages_task
# Setup cmd mock
success = {
"retcode": 0,
"stdout": "success!",
"stderr": "",
"execution_time": 5.0000,
}
nats_cmd.side_effect = ["pong", success]
# setup data
agent = baker.make_recipe(
"agents.overdue_agent", version=settings.LATEST_AGENT_VER
)
failure_action = baker.make_recipe("scripts.script")
resolved_action = baker.make_recipe("scripts.script")
alert_template = baker.make(
"alerts.AlertTemplate",
is_active=True,
agent_always_alert=True,
agent_script_actions=False,
action=failure_action,
action_timeout=30,
resolved_action=resolved_action,
resolved_action_timeout=35,
resolved_action_args=["nice_arg"],
)
agent.client.alert_template = alert_template
agent.client.save()
agent.set_alert_template()
agent_outages_task()
# should not have been called since agent_script_actions is set to False
nats_cmd.assert_not_called()
alert_template.agent_script_actions = True # type: ignore
alert_template.save() # type: ignore
agent_outages_task()
# this is what data should be
data = {
"func": "runscriptfull",
"timeout": 30,
"script_args": [],
"payload": {"code": failure_action.code, "shell": failure_action.shell},
}
nats_cmd.assert_called_with(data, timeout=30, wait=True)
nats_cmd.reset_mock()
nats_cmd.side_effect = ["pong", success]
# make sure script run results were stored
alert = Alert.objects.get(agent=agent)
self.assertEqual(alert.action_retcode, 0)
self.assertEqual(alert.action_execution_time, "5.0000")
self.assertEqual(alert.action_stdout, "success!")
self.assertEqual(alert.action_stderr, "")
# resolve alert and test
agent.last_seen = djangotime.now()
agent.save()
cache_db_fields_task()
# this is what data should be
data = {
"func": "runscriptfull",
"timeout": 35,
"script_args": ["nice_arg"],
"payload": {"code": resolved_action.code, "shell": resolved_action.shell},
}
nats_cmd.assert_called_with(data, timeout=35, wait=True)
# make sure script run results were stored
alert = Alert.objects.get(agent=agent)
self.assertEqual(alert.resolved_action_retcode, 0)
self.assertEqual(alert.resolved_action_execution_time, "5.0000")
self.assertEqual(alert.resolved_action_stdout, "success!")
self.assertEqual(alert.resolved_action_stderr, "")
def test_parse_script_args(self):
alert = baker.make("alerts.Alert")
args = ["-Parameter", "-Another {{alert.id}}"]
# test default value
self.assertEqual(
["-Parameter", f"-Another '{alert.id}'"], # type: ignore
alert.parse_script_args(args=args), # type: ignore
)
def test_prune_resolved_alerts(self):
from .tasks import prune_resolved_alerts
# setup data
resolved_alerts = baker.make(
"alerts.Alert",
resolved=True,
_quantity=25,
)
alerts = baker.make(
"alerts.Alert",
resolved=False,
_quantity=25,
)
days = 0
for alert in resolved_alerts: # type: ignore
alert.alert_time = djangotime.now() - djangotime.timedelta(days=days)
alert.save()
days = days + 5
days = 0
for alert in alerts: # type: ignore
alert.alert_time = djangotime.now() - djangotime.timedelta(days=days)
alert.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_resolved_alerts(30)
self.assertEqual(Alert.objects.count(), 31)
class TestAlertPermissions(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
self.client_setup()
def test_get_alerts_permissions(self):
agent = baker.make_recipe("agents.agent")
agent1 = baker.make_recipe("agents.agent")
agent2 = baker.make_recipe("agents.agent")
agents = [agent, agent1, agent2]
checks = baker.make("checks.Check", agent=cycle(agents), _quantity=3)
tasks = baker.make("autotasks.AutomatedTask", agent=cycle(agents), _quantity=3)
baker.make(
"alerts.Alert", alert_type="task", assigned_task=cycle(tasks), _quantity=3
)
baker.make(
"alerts.Alert",
alert_type="check",
assigned_check=cycle(checks),
_quantity=3,
)
baker.make(
"alerts.Alert", alert_type="availability", agent=cycle(agents), _quantity=3
)
baker.make("alerts.Alert", alert_type="custom", _quantity=4)
# test super user access
r = self.check_authorized_superuser("patch", f"{base_url}/")
self.assertEqual(len(r.data), 13) # type: ignore
user = self.create_user_with_roles([])
self.client.force_authenticate(user=user) # type: ignore
self.check_not_authorized("patch", f"{base_url}/")
# add list software role to user
user.role.can_list_alerts = True
user.role.save()
r = self.check_authorized("patch", f"{base_url}/")
self.assertEqual(len(r.data), 13) # type: ignore
# test limiting to client
user.role.can_view_clients.set([agent.client])
r = self.check_authorized("patch", f"{base_url}/")
self.assertEqual(len(r.data), 7) # type: ignore
# test limiting to site
user.role.can_view_clients.clear()
user.role.can_view_sites.set([agent1.site])
r = self.client.patch(f"{base_url}/")
self.assertEqual(len(r.data), 7) # type: ignore
# test limiting to site and client
user.role.can_view_clients.set([agent2.client])
r = self.client.patch(f"{base_url}/")
self.assertEqual(len(r.data), 10) # type: ignore
@patch("alerts.models.Alert.delete", return_value=1)
def test_edit_delete_get_alert_permissions(self, delete):
agent = baker.make_recipe("agents.agent")
agent1 = baker.make_recipe("agents.agent")
agent2 = baker.make_recipe("agents.agent")
agents = [agent, agent1, agent2]
checks = baker.make("checks.Check", agent=cycle(agents), _quantity=3)
tasks = baker.make("autotasks.AutomatedTask", agent=cycle(agents), _quantity=3)
alert_tasks = baker.make(
"alerts.Alert", alert_type="task", assigned_task=cycle(tasks), _quantity=3
)
alert_checks = baker.make(
"alerts.Alert",
alert_type="check",
assigned_check=cycle(checks),
_quantity=3,
)
alert_agents = baker.make(
"alerts.Alert", alert_type="availability", agent=cycle(agents), _quantity=3
)
alert_custom = baker.make("alerts.Alert", alert_type="custom", _quantity=4)
# alert task url
task_url = f"{base_url}/{alert_tasks[0].id}/" # for agent
unauthorized_task_url = f"{base_url}/{alert_tasks[1].id}/" # for agent1
# alert check url
check_url = f"{base_url}/{alert_checks[0].id}/" # for agent
unauthorized_check_url = f"{base_url}/{alert_checks[1].id}/" # for agent1
# alert agent url
agent_url = f"{base_url}/{alert_agents[0].id}/" # for agent
unauthorized_agent_url = f"{base_url}/{alert_agents[1].id}/" # for agent1
# custom alert url
custom_url = f"{base_url}/{alert_custom[0].id}/" # no agent associated
authorized_urls = [task_url, check_url, agent_url, custom_url]
unauthorized_urls = [
unauthorized_agent_url,
unauthorized_check_url,
unauthorized_task_url,
]
for method in ["get", "put", "delete"]:
# test superuser access
for url in authorized_urls:
self.check_authorized_superuser(method, url)
for url in unauthorized_urls:
self.check_authorized_superuser(method, url)
user = self.create_user_with_roles([])
self.client.force_authenticate(user=user) # type: ignore
# test user without role
for url in authorized_urls:
self.check_not_authorized(method, url)
for url in unauthorized_urls:
self.check_not_authorized(method, url)
# add user to role and test
setattr(
user.role,
"can_list_alerts" if method == "get" else "can_manage_alerts",
True,
)
user.role.save()
# test user with role
for url in authorized_urls:
self.check_authorized(method, url)
for url in unauthorized_urls:
self.check_authorized(method, url)
# limit user to client if agent check
user.role.can_view_clients.set([agent.client])
for url in authorized_urls:
self.check_authorized(method, url)
for url in unauthorized_urls:
self.check_not_authorized(method, url)
# limit user to client if agent check
user.role.can_view_sites.set([agent1.site])
for url in authorized_urls:
self.check_authorized(method, url)
for url in unauthorized_urls:
self.check_authorized(method, url)
| 38.550032 | 116 | 0.6515 |
acdfedaa4324f87619eabcfa9fc01dee5ca9c613 | 34,337 | py | Python | preprocess_inputs/eval/spider/evaluate.py | VaheShelunts/Master_thesis | 353f9b9853084ad7eae9dc3462df68fd4d37c3e6 | [
"BSD-3-Clause"
] | null | null | null | preprocess_inputs/eval/spider/evaluate.py | VaheShelunts/Master_thesis | 353f9b9853084ad7eae9dc3462df68fd4d37c3e6 | [
"BSD-3-Clause"
] | null | null | null | preprocess_inputs/eval/spider/evaluate.py | VaheShelunts/Master_thesis | 353f9b9853084ad7eae9dc3462df68fd4d37c3e6 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2020, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
################################
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
# Flag to disable value evaluation
DISABLE_VALUE = True
# Flag to disable distinct in select evaluation
DISABLE_DISTINCT = True
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
HARDNESS = {
"component1": ('where', 'group', 'order', 'limit', 'join', 'or', 'like'),
"component2": ('except', 'union', 'intersect')
}
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
db_dir = os.path.join(root_dir, 'data/spider/database')
table = os.path.join(root_dir, 'data/spider/tables.json')
etype = 'match'
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_key_list:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_keys:
#for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables
kmaps = build_foreign_key_map_from_json(table)
def condition_has_or(conds):
return 'or' in conds[1::2]
def condition_has_like(conds):
return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]]
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index('none')
def has_agg(unit):
return unit[0] != AGG_OPS.index('none')
def accuracy(count, total):
if count == total:
return 1
return 0
def recall(count, total):
if count == total:
return 1
return 0
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2. * acc * rec) / (acc + rec)
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0,0,0
elif count == pred_total:
return 1,1,1
return 0,0,0
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = label['select'][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred.split(".")[1] if "." in pred else pred for pred in pred_cols]
label_cols = [label.split(".")[1] if "." in label else label for label in label_cols]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred['groupBy']) > 0:
pred_total = 1
if len(label['groupBy']) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if pred_total == label_total == 1 \
and pred_cols == label_cols \
and pred['having'] == label['having']:
cnt = 1
return label_total, pred_total, cnt
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred['orderBy']) > 0:
pred_total = 1
if len(label['orderBy']) > 0:
label_total = 1
if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and \
((pred['limit'] is None and label['limit'] is None) or
(pred['limit'] is not None and label['limit'] is not None)):
cnt = 1
return label_total, pred_total, cnt
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1,1,1
return len(pred_ao),len(label_ao),0
def get_nestedSQL(sql):
nested = []
for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql['intersect'] is not None:
nested.append(sql['intersect'])
if sql['except'] is not None:
nested.append(sql['except'])
if sql['union'] is not None:
nested.append(sql['union'])
return nested
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += Evaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt
def get_keywords(sql):
res = set()
if len(sql['where']) > 0:
res.add('where')
if len(sql['groupBy']) > 0:
res.add('group')
if len(sql['having']) > 0:
res.add('having')
if len(sql['orderBy']) > 0:
res.add(sql['orderBy'][0])
res.add('order')
if sql['limit'] is not None:
res.add('limit')
if sql['except'] is not None:
res.add('except')
if sql['union'] is not None:
res.add('union')
if sql['intersect'] is not None:
res.add('intersect')
# or keyword
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
if len([token for token in ao if token == 'or']) > 0:
res.add('or')
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add('not')
# in keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:
res.add('in')
# like keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:
res.add('like')
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_component1(sql):
count = 0
if len(sql['where']) > 0:
count += 1
if len(sql['groupBy']) > 0:
count += 1
if len(sql['orderBy']) > 0:
count += 1
if sql['limit'] is not None:
count += 1
if len(sql['from']['table_units']) > 0: # JOIN
count += len(sql['from']['table_units']) - 1
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
count += len([token for token in ao if token == 'or'])
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])
return count
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +
[unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql, db_dir=None, db_name=None):
if isinstance(sql, str):
assert(db_dir is not None and db_name is not None)
db = os.path.join(db_dir, db_name, db_name + ".sqlite")
schema = Schema(get_schema(db))
sql = get_sql(schema, sql.replace('AS T0', '').replace('AS t0', '').replace('as T0', '').replace('as t0', ''))
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
# print(json.dumps(pred, indent=4))
# print(json.dumps(label, indent=4))
# import pdb
# pdb.set_trace()
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for _, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return int(label_tables == pred_tables)
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True
def print_scores(scores, etype):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
print('===================== EXAMPLE COUNT =====================')
counts = [scores[level]['count'] for level in levels]
print("{:<20} {:<20} {:<20} {:<20} {:<20} {:<20}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
def evaluate(gold, predict, db_dir, etype, kmaps, in_execution_order=False, verbose=False):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")]
# glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")]
evaluator = Evaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
for i, (p, g) in enumerate(zip(plist, glist)):
results, eval_err = evaluate_single_query(p, g,
evaluator=evaluator,
scores=scores,
in_execution_order=in_execution_order,
verbose=verbose)
entries.append(results)
if eval_err:
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
return entries
def evaluate_single_query_with_multiple_ground_truths(p, g_list, hardness=None, evaluator=None, scores=None,
in_execution_order=False, verbose=False):
results = None
for g in g_list:
results = evaluate_single_query(p, g,
hardness=hardness,
evaluator=evaluator,
scores=scores,
in_execution_order=in_execution_order,
verbose=verbose)[0]
if results['exact'] == 1:
return True, results['hardness'], results['table_err']
return False, results['hardness'], results['table_err']
def evaluate_single_query(p, g, hardness=None, evaluator=None, scores=None, in_execution_order=False, verbose=False):
if evaluator is None:
evaluator = Evaluator()
if isinstance(p, list):
p_str = p[0]
else:
p_str = p
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str.replace('AS T0', '').replace('AS t0', '').replace('as T0', '').replace('as t0', ''))
if hardness is None:
hardness = evaluator.eval_hardness(g_sql)
if scores:
scores[hardness]['count'] += 1
scores['all']['count'] += 1
eval_err = False
if isinstance(p_str, str):
p_sql, eval_err = get_sql_with_default(p_str, schema, in_execution_order)
else:
p_sql = p_str
p_tables, g_tables = set(), set()
get_table_recur(p_sql, p_tables)
get_table_recur(g_sql, g_tables)
# print(len(g_tables), len(p_tables), len(p_tables)-len(g_tables))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if scores and exec_score:
scores[hardness]['exec'] += 1
scores['all']['exec'] += 1
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0 and verbose:
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print("")
if scores:
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
return {
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores,
'table_err': len(p_tables) - len(g_tables)
}, eval_err
def get_sql_with_default(sql, schema, in_execution_order):
eval_err = False
try:
ast = get_sql(schema, sql, in_execution_order)
except Exception as e:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
# print(e)
ast = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err = True
return ast, eval_err
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
# Rebuild SQL functions for value evaluation
def rebuild_cond_unit_val(cond_unit):
if cond_unit is None or not DISABLE_VALUE:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
if type(val1) is not dict:
val1 = None
else:
val1 = rebuild_sql_val(val1)
if type(val2) is not dict:
val2 = None
else:
val2 = rebuild_sql_val(val2)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_val(condition):
if condition is None or not DISABLE_VALUE:
return condition
res = []
for idx, it in enumerate(condition):
if idx % 2 == 0:
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
# Rebuild SQL functions for foreign key evaluation
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if col_unit is None:
return col_unit
agg_id, col_id, distinct = col_unit
if col_id in kmap and col_id in valid_col_units:
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return agg_id, col_id, distinct
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if val_unit is None:
return val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return unit_op, col_unit1, col_unit2
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if table_unit is None:
return table_unit
table_type, col_unit_or_sql = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return table_type, col_unit_or_sql
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if cond_unit is None:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if idx % 2 == 0:
condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)
return condition
def rebuild_select_col(valid_col_units, sel, kmap):
if sel is None:
return sel
distinct, _list = sel
new_list = []
for it in _list:
agg_id, val_unit = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return distinct, new_list
def rebuild_from_col(valid_col_units, from_, kmap):
if from_ is None:
return from_
from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']]
from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)
return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if group_by is None:
return group_by
return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if order_by is None or len(order_by) == 0:
return order_by
direction, val_units = order_by
new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units]
return direction, new_val_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
def test_evaluate_single_query():
p = 'SELECT shop.Name FROM hiring JOIN shop ON hiring.Employee_ID = hiring.Shop_ID GROUP BY hiring.Shop_ID ORDER BY COUNT(*) DESC LIMIT 1'
g = ('SELECT shop.Name FROM shop JOIN hiring ON shop.Shop_ID = hiring.Shop_ID GROUP BY hiring.Shop_ID ORDER BY COUNT(*) DESC LIMIT 1',
'employee_hire_evaluation')
print(evaluate_single_query(p, g))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gold', dest='gold', type=str)
parser.add_argument('--pred', dest='pred', type=str)
parser.add_argument('--db', dest='db', type=str)
parser.add_argument('--table', dest='table', type=str)
parser.add_argument('--etype', dest='etype', type=str)
parser.add_argument('--in_execution_order', dest='in_execution_order', action='store_true')
args = parser.parse_args()
gold = args.gold
pred = args.pred
db_dir = args.db
table = args.table
etype = args.etype
assert etype in ["all", "exec", "match"], "Unknown evaluation method"
kmaps = build_foreign_key_map_from_json(table)
entries = evaluate(gold, pred, db_dir, etype, kmaps, args.in_execution_order, verbose=True)
out_scores = pred.replace('.txt', '.exact.scores')
with open(out_scores, 'w') as o_f:
for results in entries:
o_f.write('{}\n'.format(results['exact']))
print('exact match scores saved to {}'.format(out_scores))
# test_evaluate_single_query() | 35.656282 | 142 | 0.592014 |
acdfee0ae9990895a9906d8773ee51c98f834f0e | 8,256 | py | Python | setup.py | kamronald/pymatgen | 20a6e87f26d34171a98481066ed876447fdd6ac3 | [
"MIT"
] | null | null | null | setup.py | kamronald/pymatgen | 20a6e87f26d34171a98481066ed876447fdd6ac3 | [
"MIT"
] | null | null | null | setup.py | kamronald/pymatgen | 20a6e87f26d34171a98481066ed876447fdd6ac3 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import sys
import platform
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import importlib
import numpy
importlib.reload(numpy)
self.include_dirs.append(numpy.get_include())
extra_link_args = []
if sys.platform.startswith('win') and platform.machine().endswith('64'):
extra_link_args.append('-Wl,--allow-multiple-definition')
long_desc = """
Official docs: [http://pymatgen.org](http://pymatgen.org/)
Pymatgen (Python Materials Genomics) is a robust, open-source Python library
for materials analysis. These are some of the main features:
1. Highly flexible classes for the representation of Element, Site, Molecule,
Structure objects.
2. Extensive input/output support, including support for
[VASP](http://cms.mpi.univie.ac.at/vasp/), [ABINIT](http://www.abinit.org/),
CIF, Gaussian, XYZ, and many other file formats.
3. Powerful analysis tools, including generation of phase diagrams, Pourbaix
diagrams, diffusion analyses, reactions, etc.
4. Electronic structure analyses, such as density of states and band structure.
5. Integration with the Materials Project REST API.
Pymatgen is free to use. However, we also welcome your help to improve this
library by making your own contributions. These contributions can be in the
form of additional tools or modules you develop, or feature requests and bug
reports. Please report any bugs and issues at pymatgen's [Github page]
(https://github.com/materialsproject/pymatgen). For help with any pymatgen
issues, please use the [Discourse page](https://pymatgen.discourse.group).
Why use pymatgen?
=================
There are many materials analysis codes out there, both commerical and free,
but pymatgen offer several advantages:
1. **It is (fairly) robust.** Pymatgen is used by thousands of researchers,
and is the analysis code powering the [Materials Project](https://www.materialsproject.org).
The analysis it produces survives rigorous scrutiny every single day. Bugs
tend to be found and corrected quickly. Pymatgen also uses
[CircleCI](https://circleci.com) and [Appveyor](https://www.appveyor.com/)
for continuous integration on the Linux and Windows platforms,
respectively, which ensures that every commit passes a comprehensive suite
of unittests.
2. **It is well documented.** A fairly comprehensive documentation has been
written to help you get to grips with it quickly.
3. **It is open.** You are free to use and contribute to pymatgen. It also means
that pymatgen is continuously being improved. We will attribute any code you
contribute to any publication you specify. Contributing to pymatgen means
your research becomes more visible, which translates to greater impact.
4. **It is fast.** Many of the core numerical methods in pymatgen have been
optimized by vectorizing in numpy/scipy. This means that coordinate
manipulations are extremely fast and are in fact comparable to codes
written in other languages. Pymatgen also comes with a complete system for
handling periodic boundary conditions.
5. **It will be around.** Pymatgen is not a pet research project. It is used in
the well-established Materials Project. It is also actively being developed
and maintained by the [Materials Virtual Lab](https://www.materialsvirtuallab.org),
the ABINIT group and many other research groups.
With effect from version 2019.1.1, pymatgen only supports Python 3.x. Users
who require Python 2.7 should install pymatgen v2018.x.
"""
setup(
name="pymatgen",
packages=find_packages(),
version="2019.8.23",
cmdclass={'build_ext': build_ext},
setup_requires=['numpy>=1.14.3', 'setuptools>=18.0'],
python_requires='>=3.6',
install_requires=["numpy>=1.14.3", "requests", "ruamel.yaml>=0.15.6",
"monty>=2.0.6", "scipy>=1.0.1", "pydispatcher>=2.0.5",
"tabulate", "spglib>=1.9.9.44", "networkx>=2.2",
"matplotlib>=1.5", "palettable>=3.1.1", "sympy", "pandas"],
extras_require={
"provenance": ["pybtex"],
"ase": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinit": ["apscheduler", "netcdf4"],
':python_version < "3.7"': [
"dataclasses>=0.6",
]},
package_data={
"pymatgen.core": ["*.json"],
"pymatgen.analysis": ["*.yaml", "*.json"],
"pymatgen.analysis.cost": ["*.csv"],
"pymatgen.analysis.chemenv.coordination_environments.coordination_geometries_files": ["*.txt", "*.json"],
"pymatgen.analysis.chemenv.coordination_environments.strategy_files": ["*.json"],
"pymatgen.analysis.hhi": ["*.csv"],
"pymatgen.analysis.magnetism": ["*.json", "*.yaml"],
"pymatgen.analysis.structure_prediction": ["data/*.json", "*.yaml"],
"pymatgen.io": ["*.yaml"],
"pymatgen.io.vasp": ["*.yaml"],
"pymatgen.io.lammps": ["templates/*.*"],
"pymatgen.io.feff": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml", "*.json", "*.sqlite"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Pymatgen Development Team",
author_email="ongsp@eng.ucsd.edu",
maintainer="Shyue Ping Ong, Matthew Horton",
maintainer_email="ongsp@eng.ucsd.edu, mkhorton@lbl.gov",
url="http://www.pymatgen.org",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
long_description_content_type='text/markdown',
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "qchem", "materials", "science",
"project", "electronic", "structure", "analysis", "phase", "diagrams",
"crystal"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[Extension("pymatgen.optimization.linear_assignment",
["pymatgen/optimization/linear_assignment.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.util.coord_cython",
["pymatgen/util/coord_cython.c"],
extra_link_args=extra_link_args)],
entry_points={
'console_scripts': [
'pmg = pymatgen.cli.pmg:main',
'feff_input_generation = pymatgen.cli.feff_input_generation:main',
'feff_plot_cross_section = pymatgen.cli.feff_plot_cross_section:main',
'feff_plot_dos = pymatgen.cli.feff_plot_dos:main',
'gaussian_analyzer = pymatgen.cli.gaussian_analyzer:main',
'get_environment = pymatgen.cli.get_environment:main',
]
}
)
| 47.722543 | 113 | 0.666788 |
acdfeef7888cb179d93cf35ee22d3623961cd5c4 | 9,512 | py | Python | DeepLearning/ProjectMain/Project_v1.py | SeanSyue/TensorflowReferences | 2c93f4c770e2713ef4769f287e022d03e7097188 | [
"MIT"
] | null | null | null | DeepLearning/ProjectMain/Project_v1.py | SeanSyue/TensorflowReferences | 2c93f4c770e2713ef4769f287e022d03e7097188 | [
"MIT"
] | null | null | null | DeepLearning/ProjectMain/Project_v1.py | SeanSyue/TensorflowReferences | 2c93f4c770e2713ef4769f287e022d03e7097188 | [
"MIT"
] | null | null | null | import tensorflow as tf
# TRAIN_FILE = 'C:/bank/data_set/bank_dummy_train.csv'
# TEST_FILE = 'C:/bank/data_set/bank_dummy_test.csv'
# FEATURE_NUMBER = 56
# TRAIN_FILE = 'C:/bank/data_set/bank_train_duration_dropped.csv'
# TEST_FILE = 'C:/bank/data_set/bank_test_duration_dropped.csv'
# FEATURE_NUMBER = 64
TRAIN_FILE = 'C:/bank/data_set/bank_train_up.csv'
TEST_FILE = 'C:/bank/data_set/bank_test.csv'
FEATURE_NUMBER = 65
MODEL_FILEPATH = "C:/bank/checkpoint/"
ITERATION = 10000
LEARNING_RATE = 1e-7
LABEL_FACTOR = 30
DIFF_FACTOR = 70
# SUMMARY_DIRECTORY = 'C:/Users/Sean/Desktop/test2/'
SUMMARY_DIRECTORY = 'C:/bank/summary/'
def pre_processing(trn_file, tst_file):
# Loading data.
trn_que = tf.train.string_input_producer([trn_file])
tst_que = tf.train.string_input_producer([tst_file])
reader = tf.TextLineReader(skip_header_lines=1)
# Decode csv files
_, trn_value = reader.read(trn_que)
trn_dec = tf.decode_csv(trn_value, record_defaults=[[0.0]]*(FEATURE_NUMBER+1), name='decode_train')
_, tst_value = reader.read(tst_que)
tst_dec = tf.decode_csv(tst_value, record_defaults=[[0.0]] * (FEATURE_NUMBER + 1), name='decode_test')
# Read in training and testing set.
trn_ftr = trn_dec[:-1]
trn_lbl = trn_dec[-1]
tst_ftr = tst_dec[:-1]
tst_lbl = tst_dec[-1]
# Reshape tensors
x_train = tf.reshape(trn_ftr, [-1, FEATURE_NUMBER], name='x_train')
y_train = tf.reshape(trn_lbl*LABEL_FACTOR, [-1, 1], name='y_train')
x_test = tf.reshape(tst_ftr, [-1, FEATURE_NUMBER], name='x_test')
y_test = tf.reshape(tst_lbl*LABEL_FACTOR, [-1, 1], name='y_test')
return x_train, y_train, x_test, y_test
class DeepNN:
@staticmethod
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
layer_name = 'layer%s' % n_layer
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.truncated_normal([in_size, out_size]), name='W')
tf.summary.histogram(layer_name + '/weights', Weights)
with tf.name_scope('biases'):
initial_b = tf.zeros([1, out_size]) + 0.6
biases = tf.Variable(initial_b, dtype=tf.float32, name='b')
tf.summary.histogram(layer_name + '/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
tf.summary.histogram(layer_name + '/outputs', outputs)
return outputs
@staticmethod
def produce_placeholder(node_list):
with tf.name_scope('inputs'):
x = tf.placeholder(tf.float32, [None, node_list[0]], name='x_input')
y = tf.placeholder(tf.float32, [None, 1], name='y_input')
return x, y
@staticmethod
def deep_1hidden(node_list):
x, y = DeepNN.produce_placeholder(node_list)
# add hidden layer
l1 = DeepNN.add_layer(x, node_list[0], node_list[1], n_layer=1, activation_function=tf.nn.sigmoid)
# add output layer
y_ = DeepNN.add_layer(l1, node_list[1], node_list[2], n_layer='_out')
return x, y, y_
@staticmethod
def deep_2hidden(node_list):
x, y = DeepNN.produce_placeholder(node_list)
# add hidden layer
l1 = DeepNN.add_layer(x, node_list[0], node_list[1], n_layer=1, activation_function=tf.nn.relu)
l2 = DeepNN.add_layer(l1, node_list[1], node_list[2], n_layer=2, activation_function=tf.nn.relu)
# add output layer
y_ = DeepNN.add_layer(l2, node_list[2], node_list[3], n_layer='_out')
return x, y, y_
@staticmethod
def deep_4hidden(node_list):
x, y = DeepNN.produce_placeholder(node_list)
# add hidden layer
l1 = DeepNN.add_layer(x, node_list[0], node_list[1], n_layer=1, activation_function=tf.nn.relu)
l2 = DeepNN.add_layer(l1, node_list[1], node_list[2], n_layer=2, activation_function=tf.nn.relu)
l3 = DeepNN.add_layer(l2, node_list[2], node_list[3], n_layer=3, activation_function=tf.nn.relu)
l4 = DeepNN.add_layer(l3, node_list[3], node_list[4], n_layer=4, activation_function=tf.nn.relu)
# add output layer
y_ = DeepNN.add_layer(l4, node_list[4], node_list[5], n_layer='_out')
return x, y, y_
@staticmethod
def deep_6hidden(node_list):
x, y = DeepNN.produce_placeholder(node_list)
# add hidden layer
l1 = DeepNN.add_layer(x, node_list[0], node_list[1], n_layer=1, activation_function=tf.nn.relu)
l2 = DeepNN.add_layer(l1, node_list[1], node_list[2], n_layer=2, activation_function=tf.nn.relu)
l3 = DeepNN.add_layer(l2, node_list[2], node_list[3], n_layer=3, activation_function=tf.nn.relu)
l4 = DeepNN.add_layer(l3, node_list[3], node_list[4], n_layer=4, activation_function=tf.nn.relu)
l5 = DeepNN.add_layer(l4, node_list[4], node_list[5], n_layer=5, activation_function=tf.nn.relu)
l6 = DeepNN.add_layer(l5, node_list[5], node_list[6], n_layer=6, activation_function=tf.nn.relu)
# add output layer
y_ = DeepNN.add_layer(l6, node_list[6], node_list[7], n_layer='_out')
return x, y, y_
def assign_measurement(y_, y):
# Define cost function
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(y - y_)*DIFF_FACTOR, reduction_indices=[1]))
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y)
# loss = tf.reduce_mean(cross_entropy)
# Train step
with tf.name_scope('train_step'):
train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
# Define accuracy
with tf.name_scope('accuracy'):
correct_predictions = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
tf.summary.scalar('accuracy:', accuracy)
print(tf.shape(accuracy))
return loss, train_step, accuracy
class SummaryManager:
def __init__(self, session, summary):
self.sess = session
self.summ = summary
self.train_writer = None
self.test_writer = None
def assign_writers(self, location_list):
self.train_writer = tf.summary.FileWriter(location_list[0], self.sess.graph)
self.test_writer = tf.summary.FileWriter(location_list[1], self.sess.graph)
def write_summary(self, train_feed, test_feed, i):
train_result = self.sess.run(self.summ, feed_dict=train_feed)
test_result = self.sess.run(self.summ, feed_dict=test_feed)
self.train_writer.add_summary(train_result, i)
self.test_writer.add_summary(test_result, i)
class ThreadManager:
def __init__(self):
self.coord = None
self.threads = None
def assign_threads(self):
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(coord=self.coord)
def join_threads(self):
self.coord.request_stop()
self.coord.join(self.threads)
def main():
# Prepare features and labels
x_train, y_train, x_test, y_test = pre_processing(TRAIN_FILE, TEST_FILE)
x, y, y_ = DeepNN.deep_1hidden((FEATURE_NUMBER, 32, 1))
# x, y, y_ = DeepNN.deep_2hidden((FEATURE_NUMBER, FEATURE_NUMBER, 32, 1))
# x, y, y_ = DeepNN.deep_4hidden((FEATURE_NUMBER, 84, 54, 27, 10, 1))
# x, y, y_ = DeepNN.deep_6hidden((FEATURE_NUMBER, FEATURE_NUMBER, FEATURE_NUMBER, 132, 84, 48, 24, 1))
# Assign evaluation methods
loss, train_step, accuracy = assign_measurement(y_, y)
saver = tf.train.Saver()
summ = tf.summary.merge_all()
with tf.Session() as sess:
summ_mgr = SummaryManager(sess, summ)
thrd_mgr = ThreadManager()
summ_mgr.assign_writers((SUMMARY_DIRECTORY+'train/', SUMMARY_DIRECTORY+'predict/'))
thrd_mgr.assign_threads()
# Assign instances for feeding.
train_feed = {x: x_train.eval(), y: y_train.eval()}
test_feed = {x: x_test.eval(), y: y_test.eval()}
# Initialize all variables.
sess.run(tf.global_variables_initializer())
# Train loop
for i in range(ITERATION + 1):
sess.run(train_step, feed_dict=train_feed)
if i % 100 == 0:
# Write summary and show loss.
summ_mgr.write_summary(train_feed, test_feed, i)
# print(f"step {i}\n accuracy: ", sess.run(accuracy, feed_dict=test_feed))
print(f"step {i}")
print(" accuracy:", sess.run(accuracy, feed_dict=train_feed))
# print("features:", x_train.eval())
# print("labels:", y_train.eval())
# print("type:", y_train.dtype)
# print("type(y)", y.dtype.as_numpy_dtype)
# print("type(y_)", y_.dtype.as_numpy_dtype)
if i % 1000 == 0:
saver.save(sess, MODEL_FILEPATH+'test2')
thrd_mgr.join_threads()
print("---------------------------\ntensorboard --logdir=", SUMMARY_DIRECTORY)
if __name__ == '__main__':
main()
| 39.305785 | 107 | 0.631623 |
acdff0c0aeb114399011095c93f89fbaccee0abd | 1,490 | py | Python | data/external/repositories_2to3/156296/kaggle_otto-master/otto/otto_utils/blender.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/156296/kaggle_otto-master/otto/otto_utils/blender.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/156296/kaggle_otto-master/otto/otto_utils/blender.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import numpy as np
import os
from scipy.optimize import fmin_cobyla
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
from . import consts
from . import utils
def blended(c, x):
result = None
for i in range(len(c)):
result = result + c[i] * x[i] if result is not None else c[i] * x[i]
result /= sum(c)
return result
def error(p, x, y):
preds = blended(p, x)
err = log_loss(y, preds)
return err
def constraint(p, *args):
return min(p) - .0
def get_weights():
# Read validation labels
_, labels, _, _, _ = utils.load_data()
skf = StratifiedKFold(labels, n_folds=5, random_state=23)
test_index = None
for _, test_idx in skf:
test_index = np.append(test_index, test_idx) if test_index is not None else test_idx
val_labels = labels[test_index]
# Read predictions on validation set
val_predictions = []
prediction_files = utils.get_prediction_files()
for preds_file in prediction_files:
vp = np.genfromtxt(os.path.join(consts.BLEND_PATH, preds_file), delimiter=',')
val_predictions.append(vp)
# Minimize blending function
p0 = [1.] * len(prediction_files)
p = fmin_cobyla(error, p0, args=(val_predictions, val_labels), cons=[constraint], rhoend=1e-5)
return p
if __name__ == '__main__':
weights = get_weights()
print(weights)
print(weights / np.sum(weights)) | 27.592593 | 99 | 0.65302 |
acdff253917927243d15018725c0c2083fc6a3b0 | 46,351 | py | Python | python/pyarrow/tests/test_fs.py | shollyman/arrow | bf0f6aafc81e9c0d86b015607b8a637f9c4136eb | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 1 | 2021-06-17T01:11:33.000Z | 2021-06-17T01:11:33.000Z | python/pyarrow/tests/test_fs.py | royalstream/arrow | eb20a3dbc7732f612e5ce54be5f4291440829350 | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | null | null | null | python/pyarrow/tests/test_fs.py | royalstream/arrow | eb20a3dbc7732f612e5ce54be5f4291440829350 | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timezone, timedelta
import gzip
import os
import pathlib
import pickle
import sys
import pytest
import weakref
import pyarrow as pa
from pyarrow.tests.test_io import assert_file_not_found
from pyarrow.vendored.version import Version
from pyarrow.fs import (FileType, FileInfo, FileSelector, FileSystem,
LocalFileSystem, SubTreeFileSystem, _MockFileSystem,
FileSystemHandler, PyFileSystem, FSSpecHandler)
class DummyHandler(FileSystemHandler):
def __init__(self, value=42):
self._value = value
def __eq__(self, other):
if isinstance(other, FileSystemHandler):
return self._value == other._value
return NotImplemented
def __ne__(self, other):
if isinstance(other, FileSystemHandler):
return self._value != other._value
return NotImplemented
def get_type_name(self):
return "dummy"
def normalize_path(self, path):
return path
def get_file_info(self, paths):
info = []
for path in paths:
if "file" in path:
info.append(FileInfo(path, FileType.File))
elif "dir" in path:
info.append(FileInfo(path, FileType.Directory))
elif "notfound" in path:
info.append(FileInfo(path, FileType.NotFound))
elif "badtype" in path:
# Will raise when converting
info.append(object())
else:
raise IOError
return info
def get_file_info_selector(self, selector):
if selector.base_dir != "somedir":
if selector.allow_not_found:
return []
else:
raise FileNotFoundError(selector.base_dir)
infos = [
FileInfo("somedir/file1", FileType.File, size=123),
FileInfo("somedir/subdir1", FileType.Directory),
]
if selector.recursive:
infos += [
FileInfo("somedir/subdir1/file2", FileType.File, size=456),
]
return infos
def create_dir(self, path, recursive):
if path == "recursive":
assert recursive is True
elif path == "non-recursive":
assert recursive is False
else:
raise IOError
def delete_dir(self, path):
assert path == "delete_dir"
def delete_dir_contents(self, path):
if not path.strip("/"):
raise ValueError
assert path == "delete_dir_contents"
def delete_root_dir_contents(self):
pass
def delete_file(self, path):
assert path == "delete_file"
def move(self, src, dest):
assert src == "move_from"
assert dest == "move_to"
def copy_file(self, src, dest):
assert src == "copy_file_from"
assert dest == "copy_file_to"
def open_input_stream(self, path):
if "notfound" in path:
raise FileNotFoundError(path)
data = "{0}:input_stream".format(path).encode('utf8')
return pa.BufferReader(data)
def open_input_file(self, path):
if "notfound" in path:
raise FileNotFoundError(path)
data = "{0}:input_file".format(path).encode('utf8')
return pa.BufferReader(data)
def open_output_stream(self, path):
if "notfound" in path:
raise FileNotFoundError(path)
return pa.BufferOutputStream()
def open_append_stream(self, path):
if "notfound" in path:
raise FileNotFoundError(path)
return pa.BufferOutputStream()
class ProxyHandler(FileSystemHandler):
def __init__(self, fs):
self._fs = fs
def __eq__(self, other):
if isinstance(other, ProxyHandler):
return self._fs == other._fs
return NotImplemented
def __ne__(self, other):
if isinstance(other, ProxyHandler):
return self._fs != other._fs
return NotImplemented
def get_type_name(self):
return "proxy::" + self._fs.type_name
def normalize_path(self, path):
return self._fs.normalize_path(path)
def get_file_info(self, paths):
return self._fs.get_file_info(paths)
def get_file_info_selector(self, selector):
return self._fs.get_file_info(selector)
def create_dir(self, path, recursive):
return self._fs.create_dir(path, recursive=recursive)
def delete_dir(self, path):
return self._fs.delete_dir(path)
def delete_dir_contents(self, path):
return self._fs.delete_dir_contents(path)
def delete_root_dir_contents(self):
return self._fs.delete_dir_contents("", accept_root_dir=True)
def delete_file(self, path):
return self._fs.delete_file(path)
def move(self, src, dest):
return self._fs.move(src, dest)
def copy_file(self, src, dest):
return self._fs.copy_file(src, dest)
def open_input_stream(self, path):
return self._fs.open_input_stream(path)
def open_input_file(self, path):
return self._fs.open_input_file(path)
def open_output_stream(self, path):
return self._fs.open_output_stream(path)
def open_append_stream(self, path):
return self._fs.open_append_stream(path)
@pytest.fixture
def localfs(request, tempdir):
return dict(
fs=LocalFileSystem(),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_localfs(request, tempdir):
return dict(
fs=PyFileSystem(ProxyHandler(LocalFileSystem())),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def mockfs(request):
return dict(
fs=_MockFileSystem(),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_mockfs(request):
return dict(
fs=PyFileSystem(ProxyHandler(_MockFileSystem())),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def localfs_with_mmap(request, tempdir):
return dict(
fs=LocalFileSystem(use_mmap=True),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def subtree_localfs(request, tempdir, localfs):
return dict(
fs=SubTreeFileSystem(str(tempdir), localfs['fs']),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def s3fs(request, s3_connection, s3_server):
request.config.pyarrow.requires('s3')
from pyarrow.fs import S3FileSystem
host, port, access_key, secret_key = s3_connection
bucket = 'pyarrow-filesystem/'
fs = S3FileSystem(
access_key=access_key,
secret_key=secret_key,
endpoint_override='{}:{}'.format(host, port),
scheme='http'
)
fs.create_dir(bucket)
yield dict(
fs=fs,
pathfn=bucket.__add__,
allow_copy_file=True,
allow_move_dir=False,
allow_append_to_file=False,
)
fs.delete_dir(bucket)
@pytest.fixture
def subtree_s3fs(request, s3fs):
prefix = 'pyarrow-filesystem/prefix/'
return dict(
fs=SubTreeFileSystem(prefix, s3fs['fs']),
pathfn=prefix.__add__,
allow_copy_file=True,
allow_move_dir=False,
allow_append_to_file=False,
)
@pytest.fixture
def hdfs(request, hdfs_connection):
request.config.pyarrow.requires('hdfs')
if not pa.have_libhdfs():
pytest.skip('Cannot locate libhdfs')
from pyarrow.fs import HadoopFileSystem
host, port, user = hdfs_connection
fs = HadoopFileSystem(host, port=port, user=user)
return dict(
fs=fs,
pathfn=lambda p: p,
allow_copy_file=False,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_fsspec_localfs(request, tempdir):
fsspec = pytest.importorskip("fsspec")
fs = fsspec.filesystem('file')
return dict(
fs=PyFileSystem(FSSpecHandler(fs)),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_fsspec_memoryfs(request, tempdir):
fsspec = pytest.importorskip("fsspec", minversion="0.7.5")
if fsspec.__version__ == "0.8.5":
# see https://issues.apache.org/jira/browse/ARROW-10934
pytest.skip("Bug in fsspec 0.8.5 for in-memory filesystem")
fs = fsspec.filesystem('memory')
return dict(
fs=PyFileSystem(FSSpecHandler(fs)),
pathfn=lambda p: p,
allow_copy_file=True,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def py_fsspec_s3fs(request, s3_connection, s3_server):
s3fs = pytest.importorskip("s3fs")
if (sys.version_info < (3, 7) and
Version(s3fs.__version__) >= Version("0.5")):
pytest.skip("s3fs>=0.5 version is async and requires Python >= 3.7")
host, port, access_key, secret_key = s3_connection
bucket = 'pyarrow-filesystem/'
fs = s3fs.S3FileSystem(
key=access_key,
secret=secret_key,
client_kwargs=dict(endpoint_url='http://{}:{}'.format(host, port))
)
fs = PyFileSystem(FSSpecHandler(fs))
fs.create_dir(bucket)
yield dict(
fs=fs,
pathfn=bucket.__add__,
allow_copy_file=True,
allow_move_dir=False,
allow_append_to_file=True,
)
fs.delete_dir(bucket)
@pytest.fixture(params=[
pytest.param(
pytest.lazy_fixture('localfs'),
id='LocalFileSystem()'
),
pytest.param(
pytest.lazy_fixture('localfs_with_mmap'),
id='LocalFileSystem(use_mmap=True)'
),
pytest.param(
pytest.lazy_fixture('subtree_localfs'),
id='SubTreeFileSystem(LocalFileSystem())'
),
pytest.param(
pytest.lazy_fixture('s3fs'),
id='S3FileSystem'
),
pytest.param(
pytest.lazy_fixture('hdfs'),
id='HadoopFileSystem'
),
pytest.param(
pytest.lazy_fixture('mockfs'),
id='_MockFileSystem()'
),
pytest.param(
pytest.lazy_fixture('py_localfs'),
id='PyFileSystem(ProxyHandler(LocalFileSystem()))'
),
pytest.param(
pytest.lazy_fixture('py_mockfs'),
id='PyFileSystem(ProxyHandler(_MockFileSystem()))'
),
pytest.param(
pytest.lazy_fixture('py_fsspec_localfs'),
id='PyFileSystem(FSSpecHandler(fsspec.LocalFileSystem()))'
),
pytest.param(
pytest.lazy_fixture('py_fsspec_memoryfs'),
id='PyFileSystem(FSSpecHandler(fsspec.filesystem("memory")))'
),
pytest.param(
pytest.lazy_fixture('py_fsspec_s3fs'),
id='PyFileSystem(FSSpecHandler(s3fs.S3FileSystem()))'
),
])
def filesystem_config(request):
return request.param
@pytest.fixture
def fs(request, filesystem_config):
return filesystem_config['fs']
@pytest.fixture
def pathfn(request, filesystem_config):
return filesystem_config['pathfn']
@pytest.fixture
def allow_move_dir(request, filesystem_config):
return filesystem_config['allow_move_dir']
@pytest.fixture
def allow_copy_file(request, filesystem_config):
return filesystem_config['allow_copy_file']
@pytest.fixture
def allow_append_to_file(request, filesystem_config):
return filesystem_config['allow_append_to_file']
def check_mtime(file_info):
assert isinstance(file_info.mtime, datetime)
assert isinstance(file_info.mtime_ns, int)
assert file_info.mtime_ns >= 0
assert file_info.mtime_ns == pytest.approx(
file_info.mtime.timestamp() * 1e9)
# It's an aware UTC datetime
tzinfo = file_info.mtime.tzinfo
assert tzinfo is not None
assert tzinfo.utcoffset(None) == timedelta(0)
def check_mtime_absent(file_info):
assert file_info.mtime is None
assert file_info.mtime_ns is None
def check_mtime_or_absent(file_info):
if file_info.mtime is None:
check_mtime_absent(file_info)
else:
check_mtime(file_info)
def skip_fsspec_s3fs(fs):
if fs.type_name == "py::fsspec+s3":
pytest.xfail(reason="Not working with fsspec's s3fs")
def test_file_info_constructor():
dt = datetime.fromtimestamp(1568799826, timezone.utc)
info = FileInfo("foo/bar")
assert info.path == "foo/bar"
assert info.base_name == "bar"
assert info.type == FileType.Unknown
assert info.size is None
check_mtime_absent(info)
info = FileInfo("foo/baz.txt", type=FileType.File, size=123,
mtime=1568799826.5)
assert info.path == "foo/baz.txt"
assert info.base_name == "baz.txt"
assert info.type == FileType.File
assert info.size == 123
assert info.mtime_ns == 1568799826500000000
check_mtime(info)
info = FileInfo("foo", type=FileType.Directory, mtime=dt)
assert info.path == "foo"
assert info.base_name == "foo"
assert info.type == FileType.Directory
assert info.size is None
assert info.mtime == dt
assert info.mtime_ns == 1568799826000000000
check_mtime(info)
def test_cannot_instantiate_base_filesystem():
with pytest.raises(TypeError):
FileSystem()
def test_filesystem_equals():
fs0 = LocalFileSystem()
fs1 = LocalFileSystem()
fs2 = _MockFileSystem()
assert fs0.equals(fs0)
assert fs0.equals(fs1)
with pytest.raises(TypeError):
fs0.equals('string')
assert fs0 == fs0 == fs1
assert fs0 != 4
assert fs2 == fs2
assert fs2 != _MockFileSystem()
assert SubTreeFileSystem('/base', fs0) == SubTreeFileSystem('/base', fs0)
assert SubTreeFileSystem('/base', fs0) != SubTreeFileSystem('/base', fs2)
assert SubTreeFileSystem('/base', fs0) != SubTreeFileSystem('/other', fs0)
def test_subtree_filesystem():
localfs = LocalFileSystem()
subfs = SubTreeFileSystem('/base', localfs)
assert subfs.base_path == '/base/'
assert subfs.base_fs == localfs
subfs = SubTreeFileSystem('/another/base/', LocalFileSystem())
assert subfs.base_path == '/another/base/'
assert subfs.base_fs == localfs
def test_filesystem_pickling(fs):
if fs.type_name.split('::')[-1] == 'mock':
pytest.xfail(reason='MockFileSystem is not serializable')
serialized = pickle.dumps(fs)
restored = pickle.loads(serialized)
assert isinstance(restored, FileSystem)
assert restored.equals(fs)
def test_filesystem_is_functional_after_pickling(fs, pathfn):
if fs.type_name.split('::')[-1] == 'mock':
pytest.xfail(reason='MockFileSystem is not serializable')
skip_fsspec_s3fs(fs)
aaa = pathfn('a/aa/aaa/')
bb = pathfn('a/bb')
c = pathfn('c.txt')
fs.create_dir(aaa)
with fs.open_output_stream(bb):
pass # touch
with fs.open_output_stream(c) as fp:
fp.write(b'test')
restored = pickle.loads(pickle.dumps(fs))
aaa_info, bb_info, c_info = restored.get_file_info([aaa, bb, c])
assert aaa_info.type == FileType.Directory
assert bb_info.type == FileType.File
assert c_info.type == FileType.File
def test_type_name():
fs = LocalFileSystem()
assert fs.type_name == "local"
fs = _MockFileSystem()
assert fs.type_name == "mock"
def test_normalize_path(fs):
# Trivial path names (without separators) should generally be
# already normalized. Just a sanity check.
assert fs.normalize_path("foo") == "foo"
def test_non_path_like_input_raises(fs):
class Path:
pass
invalid_paths = [1, 1.1, Path(), tuple(), {}, [], lambda: 1,
pathlib.Path()]
for path in invalid_paths:
with pytest.raises(TypeError):
fs.create_dir(path)
def test_get_file_info(fs, pathfn):
aaa = pathfn('a/aa/aaa/')
bb = pathfn('a/bb')
c = pathfn('c.txt')
zzz = pathfn('zzz')
fs.create_dir(aaa)
with fs.open_output_stream(bb):
pass # touch
with fs.open_output_stream(c) as fp:
fp.write(b'test')
aaa_info, bb_info, c_info, zzz_info = fs.get_file_info([aaa, bb, c, zzz])
assert aaa_info.path == aaa
assert 'aaa' in repr(aaa_info)
assert aaa_info.extension == ''
if fs.type_name == "py::fsspec+s3":
# s3fs doesn't create empty directories
assert aaa_info.type == FileType.NotFound
else:
assert aaa_info.type == FileType.Directory
assert 'FileType.Directory' in repr(aaa_info)
assert aaa_info.size is None
check_mtime_or_absent(aaa_info)
assert bb_info.path == str(bb)
assert bb_info.base_name == 'bb'
assert bb_info.extension == ''
assert bb_info.type == FileType.File
assert 'FileType.File' in repr(bb_info)
assert bb_info.size == 0
if fs.type_name not in ["py::fsspec+memory", "py::fsspec+s3"]:
check_mtime(bb_info)
assert c_info.path == str(c)
assert c_info.base_name == 'c.txt'
assert c_info.extension == 'txt'
assert c_info.type == FileType.File
assert 'FileType.File' in repr(c_info)
assert c_info.size == 4
if fs.type_name not in ["py::fsspec+memory", "py::fsspec+s3"]:
check_mtime(c_info)
assert zzz_info.path == str(zzz)
assert zzz_info.base_name == 'zzz'
assert zzz_info.extension == ''
assert zzz_info.type == FileType.NotFound
assert zzz_info.size is None
assert zzz_info.mtime is None
assert 'FileType.NotFound' in repr(zzz_info)
check_mtime_absent(zzz_info)
# with single path
aaa_info2 = fs.get_file_info(aaa)
assert aaa_info.path == aaa_info2.path
assert aaa_info.type == aaa_info2.type
def test_get_file_info_with_selector(fs, pathfn):
base_dir = pathfn('selector-dir/')
file_a = pathfn('selector-dir/test_file_a')
file_b = pathfn('selector-dir/test_file_b')
dir_a = pathfn('selector-dir/test_dir_a')
file_c = pathfn('selector-dir/test_dir_a/test_file_c')
dir_b = pathfn('selector-dir/test_dir_b')
try:
fs.create_dir(base_dir)
with fs.open_output_stream(file_a):
pass
with fs.open_output_stream(file_b):
pass
fs.create_dir(dir_a)
with fs.open_output_stream(file_c):
pass
fs.create_dir(dir_b)
# recursive selector
selector = FileSelector(base_dir, allow_not_found=False,
recursive=True)
assert selector.base_dir == base_dir
infos = fs.get_file_info(selector)
if fs.type_name == "py::fsspec+s3":
# s3fs only lists directories if they are not empty, but depending
# on the s3fs/fsspec version combo, it includes the base_dir
# (https://github.com/dask/s3fs/issues/393)
assert (len(infos) == 4) or (len(infos) == 5)
else:
assert len(infos) == 5
for info in infos:
if (info.path.endswith(file_a) or info.path.endswith(file_b) or
info.path.endswith(file_c)):
assert info.type == FileType.File
elif (info.path.rstrip("/").endswith(dir_a) or
info.path.rstrip("/").endswith(dir_b)):
assert info.type == FileType.Directory
elif (fs.type_name == "py::fsspec+s3" and
info.path.rstrip("/").endswith("selector-dir")):
# s3fs can include base dir, see above
assert info.type == FileType.Directory
else:
raise ValueError('unexpected path {}'.format(info.path))
check_mtime_or_absent(info)
# non-recursive selector -> not selecting the nested file_c
selector = FileSelector(base_dir, recursive=False)
infos = fs.get_file_info(selector)
if fs.type_name == "py::fsspec+s3":
# s3fs only lists directories if they are not empty
# + for s3fs 0.5.2 all directories are dropped because of buggy
# side-effect of previous find() call
# (https://github.com/dask/s3fs/issues/410)
assert (len(infos) == 3) or (len(infos) == 2)
else:
assert len(infos) == 4
finally:
fs.delete_dir(base_dir)
def test_create_dir(fs, pathfn):
# s3fs fails deleting dir fails if it is empty
# (https://github.com/dask/s3fs/issues/317)
skip_fsspec_s3fs(fs)
d = pathfn('test-directory/')
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
fs.create_dir(d)
fs.delete_dir(d)
d = pathfn('deeply/nested/test-directory/')
fs.create_dir(d, recursive=True)
fs.delete_dir(d)
def test_delete_dir(fs, pathfn):
skip_fsspec_s3fs(fs)
d = pathfn('directory/')
nd = pathfn('directory/nested/')
fs.create_dir(nd)
fs.delete_dir(d)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(nd)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
def test_delete_dir_contents(fs, pathfn):
skip_fsspec_s3fs(fs)
d = pathfn('directory/')
nd = pathfn('directory/nested/')
fs.create_dir(nd)
fs.delete_dir_contents(d)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(nd)
fs.delete_dir(d)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
def _check_root_dir_contents(config):
fs = config['fs']
pathfn = config['pathfn']
d = pathfn('directory/')
nd = pathfn('directory/nested/')
fs.create_dir(nd)
with pytest.raises(pa.ArrowInvalid):
fs.delete_dir_contents("")
with pytest.raises(pa.ArrowInvalid):
fs.delete_dir_contents("/")
with pytest.raises(pa.ArrowInvalid):
fs.delete_dir_contents("//")
fs.delete_dir_contents("", accept_root_dir=True)
fs.delete_dir_contents("/", accept_root_dir=True)
fs.delete_dir_contents("//", accept_root_dir=True)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
def test_delete_root_dir_contents(mockfs, py_mockfs):
_check_root_dir_contents(mockfs)
_check_root_dir_contents(py_mockfs)
def test_copy_file(fs, pathfn, allow_copy_file):
s = pathfn('test-copy-source-file')
t = pathfn('test-copy-target-file')
with fs.open_output_stream(s):
pass
if allow_copy_file:
fs.copy_file(s, t)
fs.delete_file(s)
fs.delete_file(t)
else:
with pytest.raises(pa.ArrowNotImplementedError):
fs.copy_file(s, t)
def test_move_directory(fs, pathfn, allow_move_dir):
# move directory (doesn't work with S3)
s = pathfn('source-dir/')
t = pathfn('target-dir/')
fs.create_dir(s)
if allow_move_dir:
fs.move(s, t)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(s)
fs.delete_dir(t)
else:
with pytest.raises(pa.ArrowIOError):
fs.move(s, t)
def test_move_file(fs, pathfn):
# s3fs moving a file with recursive=True on latest 0.5 version
# (https://github.com/dask/s3fs/issues/394)
skip_fsspec_s3fs(fs)
s = pathfn('test-move-source-file')
t = pathfn('test-move-target-file')
with fs.open_output_stream(s):
pass
fs.move(s, t)
with pytest.raises(pa.ArrowIOError):
fs.delete_file(s)
fs.delete_file(t)
def test_delete_file(fs, pathfn):
p = pathfn('test-delete-target-file')
with fs.open_output_stream(p):
pass
fs.delete_file(p)
with pytest.raises(pa.ArrowIOError):
fs.delete_file(p)
d = pathfn('test-delete-nested')
fs.create_dir(d)
f = pathfn('test-delete-nested/target-file')
with fs.open_output_stream(f) as s:
s.write(b'data')
fs.delete_dir(d)
def identity(v):
return v
@pytest.mark.parametrize(
('compression', 'buffer_size', 'compressor'),
[
(None, None, identity),
(None, 64, identity),
('gzip', None, gzip.compress),
('gzip', 256, gzip.compress),
]
)
def test_open_input_stream(fs, pathfn, compression, buffer_size, compressor):
p = pathfn('open-input-stream')
data = b'some data for reading\n' * 512
with fs.open_output_stream(p) as s:
s.write(compressor(data))
with fs.open_input_stream(p, compression, buffer_size) as s:
result = s.read()
assert result == data
def test_open_input_file(fs, pathfn):
p = pathfn('open-input-file')
data = b'some data' * 1024
with fs.open_output_stream(p) as s:
s.write(data)
read_from = len(b'some data') * 512
with fs.open_input_file(p) as f:
f.seek(read_from)
result = f.read()
assert result == data[read_from:]
@pytest.mark.parametrize(
('compression', 'buffer_size', 'decompressor'),
[
(None, None, identity),
(None, 64, identity),
('gzip', None, gzip.decompress),
('gzip', 256, gzip.decompress),
]
)
def test_open_output_stream(fs, pathfn, compression, buffer_size,
decompressor):
p = pathfn('open-output-stream')
data = b'some data for writing' * 1024
with fs.open_output_stream(p, compression, buffer_size) as f:
f.write(data)
with fs.open_input_stream(p, compression, buffer_size) as f:
assert f.read(len(data)) == data
@pytest.mark.parametrize(
('compression', 'buffer_size', 'compressor', 'decompressor'),
[
(None, None, identity, identity),
(None, 64, identity, identity),
('gzip', None, gzip.compress, gzip.decompress),
('gzip', 256, gzip.compress, gzip.decompress),
]
)
def test_open_append_stream(fs, pathfn, compression, buffer_size, compressor,
decompressor, allow_append_to_file):
p = pathfn('open-append-stream')
initial = compressor(b'already existing')
with fs.open_output_stream(p) as s:
s.write(initial)
if allow_append_to_file:
with fs.open_append_stream(p, compression=compression,
buffer_size=buffer_size) as f:
f.write(b'\nnewly added')
with fs.open_input_stream(p) as f:
result = f.read()
result = decompressor(result)
assert result == b'already existing\nnewly added'
else:
with pytest.raises(pa.ArrowNotImplementedError):
fs.open_append_stream(p, compression=compression,
buffer_size=buffer_size)
def test_localfs_options():
# LocalFileSystem instantiation
LocalFileSystem(use_mmap=False)
with pytest.raises(TypeError):
LocalFileSystem(xxx=False)
def test_localfs_errors(localfs):
# Local filesystem errors should raise the right Python exceptions
# (e.g. FileNotFoundError)
fs = localfs['fs']
with assert_file_not_found():
fs.open_input_stream('/non/existent/file')
with assert_file_not_found():
fs.open_output_stream('/non/existent/file')
with assert_file_not_found():
fs.create_dir('/non/existent/dir', recursive=False)
with assert_file_not_found():
fs.delete_dir('/non/existent/dir')
with assert_file_not_found():
fs.delete_file('/non/existent/dir')
with assert_file_not_found():
fs.move('/non/existent', '/xxx')
with assert_file_not_found():
fs.copy_file('/non/existent', '/xxx')
def test_localfs_file_info(localfs):
fs = localfs['fs']
file_path = pathlib.Path(__file__)
dir_path = file_path.parent
[file_info, dir_info] = fs.get_file_info([file_path.as_posix(),
dir_path.as_posix()])
assert file_info.size == file_path.stat().st_size
assert file_info.mtime_ns == file_path.stat().st_mtime_ns
check_mtime(file_info)
assert dir_info.mtime_ns == dir_path.stat().st_mtime_ns
check_mtime(dir_info)
def test_mockfs_mtime_roundtrip(mockfs):
dt = datetime.fromtimestamp(1568799826, timezone.utc)
fs = _MockFileSystem(dt)
with fs.open_output_stream('foo'):
pass
[info] = fs.get_file_info(['foo'])
assert info.mtime == dt
@pytest.mark.s3
def test_s3_options():
from pyarrow.fs import S3FileSystem
fs = S3FileSystem(access_key='access', secret_key='secret',
session_token='token', region='us-east-2',
scheme='https', endpoint_override='localhost:8999')
assert isinstance(fs, S3FileSystem)
assert fs.region == 'us-east-2'
assert pickle.loads(pickle.dumps(fs)) == fs
fs = S3FileSystem(role_arn='role', session_name='session',
external_id='id', load_frequency=100)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
with pytest.raises(ValueError):
S3FileSystem(access_key='access')
with pytest.raises(ValueError):
S3FileSystem(secret_key='secret')
with pytest.raises(ValueError):
S3FileSystem(access_key='access', session_token='token')
with pytest.raises(ValueError):
S3FileSystem(secret_key='secret', session_token='token')
with pytest.raises(ValueError):
S3FileSystem(
access_key='access', secret_key='secret', role_arn='arn'
)
@pytest.mark.s3
def test_s3_proxy_options(monkeypatch):
from pyarrow.fs import S3FileSystem
# The following two are equivalent:
proxy_opts_1_dict = {'scheme': 'http', 'host': 'localhost', 'port': 8999}
proxy_opts_1_str = 'http://localhost:8999'
# The following two are equivalent:
proxy_opts_2_dict = {'scheme': 'https', 'host': 'localhost', 'port': 8080}
proxy_opts_2_str = 'https://localhost:8080'
# Check dict case for 'proxy_options'
fs = S3FileSystem(proxy_options=proxy_opts_1_dict)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
fs = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
# Check str case for 'proxy_options'
fs = S3FileSystem(proxy_options=proxy_opts_1_str)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
fs = S3FileSystem(proxy_options=proxy_opts_2_str)
assert isinstance(fs, S3FileSystem)
assert pickle.loads(pickle.dumps(fs)) == fs
# Check that two FSs using the same proxy_options dict are equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_1_dict)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
# Check that two FSs using the same proxy_options str are equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_1_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
# Check that two FSs using equivalent proxy_options
# (one dict, one str) are equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_1_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 == fs2
assert pickle.loads(pickle.dumps(fs1)) == fs2
assert pickle.loads(pickle.dumps(fs2)) == fs1
# Check that two FSs using nonequivalent proxy_options are not equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem(proxy_options=proxy_opts_2_str)
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
# Check that two FSs (one using proxy_options and the other not)
# are not equal
fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_1_str)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
fs1 = S3FileSystem(proxy_options=proxy_opts_2_str)
fs2 = S3FileSystem()
assert fs1 != fs2
assert pickle.loads(pickle.dumps(fs1)) != fs2
assert pickle.loads(pickle.dumps(fs2)) != fs1
# Only dict and str are supported
with pytest.raises(TypeError):
S3FileSystem(proxy_options=('http', 'localhost', 9090))
# Missing scheme
with pytest.raises(KeyError):
S3FileSystem(proxy_options={'host': 'localhost', 'port': 9090})
# Missing host
with pytest.raises(KeyError):
S3FileSystem(proxy_options={'scheme': 'https', 'port': 9090})
# Missing port
with pytest.raises(KeyError):
S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost'})
# Invalid proxy URI (invalid scheme htttps)
with pytest.raises(pa.ArrowInvalid):
S3FileSystem(proxy_options='htttps://localhost:9000')
# Invalid proxy_options dict (invalid scheme htttps)
with pytest.raises(pa.ArrowInvalid):
S3FileSystem(proxy_options={'scheme': 'htttp', 'host': 'localhost',
'port': 8999})
@pytest.mark.hdfs
def test_hdfs_options(hdfs_connection):
from pyarrow.fs import HadoopFileSystem
if not pa.have_libhdfs():
pytest.skip('Cannot locate libhdfs')
host, port, user = hdfs_connection
replication = 2
buffer_size = 64*1024
default_block_size = 128*1024**2
uri = ('hdfs://{}:{}/?user={}&replication={}&buffer_size={}'
'&default_block_size={}')
hdfs1 = HadoopFileSystem(host, port, user='libhdfs',
replication=replication, buffer_size=buffer_size,
default_block_size=default_block_size)
hdfs2 = HadoopFileSystem.from_uri(uri.format(
host, port, 'libhdfs', replication, buffer_size, default_block_size
))
hdfs3 = HadoopFileSystem.from_uri(uri.format(
host, port, 'me', replication, buffer_size, default_block_size
))
hdfs4 = HadoopFileSystem.from_uri(uri.format(
host, port, 'me', replication + 1, buffer_size, default_block_size
))
hdfs5 = HadoopFileSystem(host, port)
hdfs6 = HadoopFileSystem.from_uri('hdfs://{}:{}'.format(host, port))
hdfs7 = HadoopFileSystem(host, port, user='localuser')
hdfs8 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket="cache_path")
hdfs9 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket=pathlib.Path("cache_path"))
hdfs10 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket="cache_path2")
hdfs11 = HadoopFileSystem(host, port, user='localuser',
kerb_ticket="cache_path",
extra_conf={'hdfs_token': 'abcd'})
assert hdfs1 == hdfs2
assert hdfs5 == hdfs6
assert hdfs6 != hdfs7
assert hdfs2 != hdfs3
assert hdfs3 != hdfs4
assert hdfs7 != hdfs5
assert hdfs2 != hdfs3
assert hdfs3 != hdfs4
assert hdfs7 != hdfs8
assert hdfs8 == hdfs9
assert hdfs10 != hdfs9
assert hdfs11 != hdfs8
with pytest.raises(TypeError):
HadoopFileSystem()
with pytest.raises(TypeError):
HadoopFileSystem.from_uri(3)
for fs in [hdfs1, hdfs2, hdfs3, hdfs4, hdfs5, hdfs6, hdfs7, hdfs8,
hdfs9, hdfs10, hdfs11]:
assert pickle.loads(pickle.dumps(fs)) == fs
host, port, user = hdfs_connection
hdfs = HadoopFileSystem(host, port, user=user)
assert hdfs.get_file_info(FileSelector('/'))
hdfs = HadoopFileSystem.from_uri(
"hdfs://{}:{}/?user={}".format(host, port, user)
)
assert hdfs.get_file_info(FileSelector('/'))
@pytest.mark.parametrize(('uri', 'expected_klass', 'expected_path'), [
# leading slashes are removed intentionally, because MockFileSystem doesn't
# have a distinction between relative and absolute paths
('mock:', _MockFileSystem, ''),
('mock:foo/bar', _MockFileSystem, 'foo/bar'),
('mock:/foo/bar', _MockFileSystem, 'foo/bar'),
('mock:///foo/bar', _MockFileSystem, 'foo/bar'),
('file:/', LocalFileSystem, '/'),
('file:///', LocalFileSystem, '/'),
('file:/foo/bar', LocalFileSystem, '/foo/bar'),
('file:///foo/bar', LocalFileSystem, '/foo/bar'),
('/', LocalFileSystem, '/'),
('/foo/bar', LocalFileSystem, '/foo/bar'),
])
def test_filesystem_from_uri(uri, expected_klass, expected_path):
fs, path = FileSystem.from_uri(uri)
assert isinstance(fs, expected_klass)
assert path == expected_path
@pytest.mark.parametrize(
'path',
['', '/', 'foo/bar', '/foo/bar', __file__]
)
def test_filesystem_from_path_object(path):
p = pathlib.Path(path)
fs, path = FileSystem.from_uri(p)
assert isinstance(fs, LocalFileSystem)
assert path == p.resolve().absolute().as_posix()
@pytest.mark.s3
def test_filesystem_from_uri_s3(s3_connection, s3_server):
from pyarrow.fs import S3FileSystem
host, port, access_key, secret_key = s3_connection
uri = "s3://{}:{}@mybucket/foo/bar?scheme=http&endpoint_override={}:{}" \
.format(access_key, secret_key, host, port)
fs, path = FileSystem.from_uri(uri)
assert isinstance(fs, S3FileSystem)
assert path == "mybucket/foo/bar"
fs.create_dir(path)
[info] = fs.get_file_info([path])
assert info.path == path
assert info.type == FileType.Directory
def test_py_filesystem():
handler = DummyHandler()
fs = PyFileSystem(handler)
assert isinstance(fs, PyFileSystem)
assert fs.type_name == "py::dummy"
assert fs.handler is handler
with pytest.raises(TypeError):
PyFileSystem(None)
def test_py_filesystem_equality():
handler1 = DummyHandler(1)
handler2 = DummyHandler(2)
handler3 = DummyHandler(2)
fs1 = PyFileSystem(handler1)
fs2 = PyFileSystem(handler1)
fs3 = PyFileSystem(handler2)
fs4 = PyFileSystem(handler3)
assert fs2 is not fs1
assert fs3 is not fs2
assert fs4 is not fs3
assert fs2 == fs1 # Same handler
assert fs3 != fs2 # Unequal handlers
assert fs4 == fs3 # Equal handlers
assert fs1 != LocalFileSystem()
assert fs1 != object()
def test_py_filesystem_pickling():
handler = DummyHandler()
fs = PyFileSystem(handler)
serialized = pickle.dumps(fs)
restored = pickle.loads(serialized)
assert isinstance(restored, FileSystem)
assert restored == fs
assert restored.handler == handler
assert restored.type_name == "py::dummy"
def test_py_filesystem_lifetime():
handler = DummyHandler()
fs = PyFileSystem(handler)
assert isinstance(fs, PyFileSystem)
wr = weakref.ref(handler)
handler = None
assert wr() is not None
fs = None
assert wr() is None
# Taking the .handler attribute doesn't wreck reference counts
handler = DummyHandler()
fs = PyFileSystem(handler)
wr = weakref.ref(handler)
handler = None
assert wr() is fs.handler
assert wr() is not None
fs = None
assert wr() is None
def test_py_filesystem_get_file_info():
handler = DummyHandler()
fs = PyFileSystem(handler)
[info] = fs.get_file_info(['some/dir'])
assert info.path == 'some/dir'
assert info.type == FileType.Directory
[info] = fs.get_file_info(['some/file'])
assert info.path == 'some/file'
assert info.type == FileType.File
[info] = fs.get_file_info(['notfound'])
assert info.path == 'notfound'
assert info.type == FileType.NotFound
with pytest.raises(TypeError):
fs.get_file_info(['badtype'])
with pytest.raises(IOError):
fs.get_file_info(['xxx'])
def test_py_filesystem_get_file_info_selector():
handler = DummyHandler()
fs = PyFileSystem(handler)
selector = FileSelector(base_dir="somedir")
infos = fs.get_file_info(selector)
assert len(infos) == 2
assert infos[0].path == "somedir/file1"
assert infos[0].type == FileType.File
assert infos[0].size == 123
assert infos[1].path == "somedir/subdir1"
assert infos[1].type == FileType.Directory
assert infos[1].size is None
selector = FileSelector(base_dir="somedir", recursive=True)
infos = fs.get_file_info(selector)
assert len(infos) == 3
assert infos[0].path == "somedir/file1"
assert infos[1].path == "somedir/subdir1"
assert infos[2].path == "somedir/subdir1/file2"
selector = FileSelector(base_dir="notfound")
with pytest.raises(FileNotFoundError):
fs.get_file_info(selector)
selector = FileSelector(base_dir="notfound", allow_not_found=True)
assert fs.get_file_info(selector) == []
def test_py_filesystem_ops():
handler = DummyHandler()
fs = PyFileSystem(handler)
fs.create_dir("recursive", recursive=True)
fs.create_dir("non-recursive", recursive=False)
with pytest.raises(IOError):
fs.create_dir("foobar")
fs.delete_dir("delete_dir")
fs.delete_dir_contents("delete_dir_contents")
for path in ("", "/", "//"):
with pytest.raises(ValueError):
fs.delete_dir_contents(path)
fs.delete_dir_contents(path, accept_root_dir=True)
fs.delete_file("delete_file")
fs.move("move_from", "move_to")
fs.copy_file("copy_file_from", "copy_file_to")
def test_py_open_input_stream():
fs = PyFileSystem(DummyHandler())
with fs.open_input_stream("somefile") as f:
assert f.read() == b"somefile:input_stream"
with pytest.raises(FileNotFoundError):
fs.open_input_stream("notfound")
def test_py_open_input_file():
fs = PyFileSystem(DummyHandler())
with fs.open_input_file("somefile") as f:
assert f.read() == b"somefile:input_file"
with pytest.raises(FileNotFoundError):
fs.open_input_file("notfound")
def test_py_open_output_stream():
fs = PyFileSystem(DummyHandler())
with fs.open_output_stream("somefile") as f:
f.write(b"data")
def test_py_open_append_stream():
fs = PyFileSystem(DummyHandler())
with fs.open_append_stream("somefile") as f:
f.write(b"data")
@pytest.mark.s3
def test_s3_real_aws():
# Exercise connection code with an AWS-backed S3 bucket.
# This is a minimal integration check for ARROW-9261 and similar issues.
from pyarrow.fs import S3FileSystem
default_region = (os.environ.get('PYARROW_TEST_S3_REGION') or
'us-east-1')
fs = S3FileSystem(anonymous=True)
assert fs.region == default_region
fs = S3FileSystem(anonymous=True, region='us-east-2')
entries = fs.get_file_info(FileSelector('ursa-labs-taxi-data'))
assert len(entries) > 0
@pytest.mark.s3
def test_s3_real_aws_region_selection():
# Taken from a registry of open S3-hosted datasets
# at https://github.com/awslabs/open-data-registry
fs, path = FileSystem.from_uri('s3://mf-nwp-models/README.txt')
assert fs.region == 'eu-west-1'
with fs.open_input_stream(path) as f:
assert b"Meteo-France Atmospheric models on AWS" in f.read(50)
# Passing an explicit region disables auto-selection
fs, path = FileSystem.from_uri(
's3://mf-nwp-models/README.txt?region=us-east-2')
assert fs.region == 'us-east-2'
# Reading from the wrong region may still work for public buckets...
# Non-existent bucket (hopefully, otherwise need to fix this test)
with pytest.raises(IOError, match="Bucket '.*' not found"):
FileSystem.from_uri('s3://x-arrow-non-existent-bucket')
fs, path = FileSystem.from_uri(
's3://x-arrow-non-existent-bucket?region=us-east-3')
assert fs.region == 'us-east-3'
| 30.514154 | 79 | 0.655994 |
acdff299c58ca034df7311ce9671d9c1d8cd1f07 | 850 | py | Python | tests/ctrls/ws_controllers.py | jo19in1/python-simple-http-server | c6f765d714f792f312b32788d81d75a854a229da | [
"MIT"
] | null | null | null | tests/ctrls/ws_controllers.py | jo19in1/python-simple-http-server | c6f765d714f792f312b32788d81d75a854a229da | [
"MIT"
] | null | null | null | tests/ctrls/ws_controllers.py | jo19in1/python-simple-http-server | c6f765d714f792f312b32788d81d75a854a229da | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from simple_http_server import WebsocketHandler, WebsocketRequest, WebsocketSession, websocket_handler
import simple_http_server.logger as logger
_logger = logger.get_logger("ws_test")
@websocket_handler(endpoint="/ws/{path_val}")
class WSHandler(WebsocketHandler):
def on_handshake(self, request: WebsocketRequest):
return 0, {}
async def on_open(self, session: WebsocketSession):
_logger.info(f">>{session.id}<< open! {session.request.path_values}")
def on_text_message(self, session: WebsocketSession, message: str):
_logger.info(f">>{session.id}<< on text message: {message}")
session.send(f"{session.request.path_values['path_val']}-{message}")
def on_close(self, session: WebsocketSession, reason: str):
_logger.info(f">>{session.id}<< close::{reason}")
| 32.692308 | 102 | 0.710588 |
acdff3737b939d5197ba12240c5350f0cab95e93 | 7,378 | py | Python | scripts/symbolic_ICESat2_files.py | outlk/read-ICESat-2 | 4a1e90038548a050b4bdbcbcf9e4fb7864a52b9f | [
"MIT"
] | 50 | 2019-07-22T14:13:28.000Z | 2022-03-16T19:18:07.000Z | scripts/symbolic_ICESat2_files.py | outlk/read-ICESat-2 | 4a1e90038548a050b4bdbcbcf9e4fb7864a52b9f | [
"MIT"
] | 2 | 2020-08-16T06:52:24.000Z | 2021-07-12T23:05:07.000Z | scripts/symbolic_ICESat2_files.py | outlk/read-ICESat-2 | 4a1e90038548a050b4bdbcbcf9e4fb7864a52b9f | [
"MIT"
] | 19 | 2019-07-01T03:01:01.000Z | 2022-02-25T00:29:44.000Z | #!/usr/bin/env python
u"""
symbolic_ICESat2_files.py
Written by Tyler Sutterley (10/2021)
Creates symbolic links for ICESat-2 HDF5 files organized by date
CALLING SEQUENCE:
python symbolic_ICESat2_files.py --product ATL06 --release 003 \
--granule 10 11 12 --cycle 1 2 --directory <path_to_directory>
--scf_outgoing <path_to_outgoing> --verbose --mode 0o775
COMMAND LINE OPTIONS:
-h, --help: list the command line options
-D X, --directory X: local working directory for creating symbolic links
--product X: ICESat-2 data product to create symbolic links
--release X: ICESat-2 data release to create symbolic links
--version X: ICESat-2 data version to create symbolic links
--granule X: ICESat-2 granule regions to create symbolic links
--cycle X: ICESat-2 cycle to create symbolic links
--track X: ICESat-2 tracks to create symbolic links
--scf_incoming X: directory on the SCF where the rscf sends PANS
--scf_outgoing X: directory on the SCF where the data resides
-V, --verbose: output information about each symbolic link
-M X, --mode X: permission mode of directories
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
Updated 11/2020: add exception for FileExistsError to skip files
Updated 10/2020: using argparse to set parameters
Updated 05/2020: adjust regular expression to run ATL07 sea ice products
Written 07/2019
"""
from __future__ import print_function
import sys
import os
import re
import logging
import argparse
import numpy as np
#-- Main program that calls symbolic_ICESat2_files()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Creates symbolic links for ICESat-2 HDF5 files from the
local scf directory to a separate directory organized by date
"""
)
#-- ICESat-2 Products
PRODUCTS = {}
PRODUCTS['ATL03'] = 'Global Geolocated Photon Data'
PRODUCTS['ATL04'] = 'Normalized Relative Backscatter'
PRODUCTS['ATL06'] = 'Land Ice Height'
PRODUCTS['ATL07'] = 'Sea Ice Height'
PRODUCTS['ATL08'] = 'Land and Vegetation Height'
PRODUCTS['ATL09'] = 'Atmospheric Layer Characteristics'
PRODUCTS['ATL10'] = 'Sea Ice Freeboard'
PRODUCTS['ATL12'] = 'Ocean Surface Height'
PRODUCTS['ATL13'] = 'Inland Water Surface Height'
#-- command line parameters
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory for symbolic link')
#-- ICESat-2 parameters
#-- ICESat-2 data product
parser.add_argument('--product','-p',
metavar='PRODUCTS', type=str,
choices=PRODUCTS.keys(), default='ATL06',
help='ICESat-2 data product to create symbolic links')
#-- ICESat-2 data release
parser.add_argument('--release','-r',
type=str, default='004',
help='ICESat-2 data release to create symbolic links')
#-- ICESat-2 data version
parser.add_argument('--version','-v',
type=int, nargs='+', default=range(1,10),
help='ICESat-2 data versions to create symbolic links')
#-- ICESat-2 granule region
parser.add_argument('--granule','-g',
metavar='REGION', type=int, nargs='+',
choices=range(1,15), default=range(1,15),
help='ICESat-2 granule regions to create symbolic links')
#-- ICESat-2 orbital cycle
parser.add_argument('--cycle','-c',
type=int, nargs='+',
default=range(1,10),
help='ICESat-2 orbital cycles to create symbolic links')
#-- ICESat-2 reference ground tracks
parser.add_argument('--track','-t',
metavar='RGT', type=int, nargs='+',
choices=range(1,1388), default=range(1,1388),
help='ICESat-2 Reference Ground Tracks (RGTs) to create symbolic links')
#-- ICESat-2 Science Computing Facility (SCF) parameters
parser.add_argument('--scf_incoming',
type=str,
help='Directory on the SCF where the rscf sends PANS')
parser.add_argument('--scf_outgoing',
type=str,
help='Directory on the SCF where the data resides')
#-- verbose will output information about each symbolic link
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Output information about each symbolic link')
#-- permissions mode of the local directories (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output directories')
args,_ = parser.parse_known_args()
#-- create logger
loglevel = logging.INFO if args.verbose else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- run program
symbolic_ICESat2_files(args.directory, args.scf_incoming, args.scf_outgoing,
args.product, args.release, args.version, args.granule, args.cycle,
args.track, MODE=args.mode)
#-- PURPOSE: copy ICESat-2 files to data directory with data subdirectories
def symbolic_ICESat2_files(base_dir, scf_incoming, scf_outgoing, PRODUCT,
RELEASE, VERSIONS, GRANULES, CYCLES, TRACKS, MODE=0o775):
#-- find ICESat-2 HDF5 files in the subdirectory for product and release
TRACKS = np.arange(1,1388) if not np.any(TRACKS) else TRACKS
CYCLES = np.arange(1,10) if not np.any(CYCLES) else CYCLES
GRANULES = np.arange(1,15) if not np.any(GRANULES) else GRANULES
VERSIONS = np.arange(1,10) if not np.any(VERSIONS) else VERSIONS
regex_track = '|'.join(['{0:04d}'.format(T) for T in TRACKS])
regex_cycle = '|'.join(['{0:02d}'.format(C) for C in CYCLES])
regex_granule = '|'.join(['{0:02d}'.format(G) for G in GRANULES])
regex_version = '|'.join(['{0:02d}'.format(V) for V in VERSIONS])
#-- compile regular expression operator for extracting data from files
args = (PRODUCT,regex_track,regex_cycle,regex_granule,RELEASE,regex_version)
regex_pattern = (r'(processed_)?({0})(-\d{{2}})?_(\d{{4}})(\d{{2}})(\d{{2}})'
r'(\d{{2}})(\d{{2}})(\d{{2}})_({1})({2})({3})_({4})_({5})(.*?).h5$')
rx = re.compile(regex_pattern.format(*args),re.VERBOSE)
#-- find files within scf_outgoing
file_transfers = [f for f in os.listdir(scf_outgoing) if rx.match(f)]
for f in sorted(file_transfers):
#-- extract parameters from file
SUB,PRD,HEM,YY,MM,DD,HH,MN,SS,TRK,CYC,GRN,RL,VRS,AUX=rx.findall(f).pop()
#-- put symlinks in directories similar to NSIDC
#-- check if data directory exists and recursively create if not
local_dir = os.path.join(base_dir,'{0}.{1}.{2}'.format(YY,MM,DD))
os.makedirs(local_dir,MODE) if not os.path.exists(local_dir) else None
#-- attempt to create the symbolic link else continue
try:
#-- create symbolic link of file from scf_outgoing to local
os.symlink(os.path.join(scf_outgoing,f), os.path.join(local_dir,f))
except FileExistsError:
continue
else:
#-- print original and symbolic link of file
args = (os.path.join(scf_outgoing,f),os.path.join(local_dir,f))
logging.info('{0} -->\n\t{1}'.format(*args))
#-- run main program
if __name__ == '__main__':
main()
| 45.54321 | 81 | 0.66888 |
acdff3829dac8b3b524fd5fef567b36ce7365038 | 11,535 | py | Python | pysnmp-with-texts/IB-DHCPV6ONE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/IB-DHCPV6ONE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/IB-DHCPV6ONE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module IB-DHCPV6ONE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IB-DHCPV6ONE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:50:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
IbString, ibDHCPOne, IbIpv6Addr = mibBuilder.importSymbols("IB-SMI-MIB", "IbString", "ibDHCPOne", "IbIpv6Addr")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Gauge32, Counter32, Integer32, ModuleIdentity, Unsigned32, enterprises, TimeTicks, ObjectIdentity, MibIdentifier, NotificationType, Counter64, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Gauge32", "Counter32", "Integer32", "ModuleIdentity", "Unsigned32", "enterprises", "TimeTicks", "ObjectIdentity", "MibIdentifier", "NotificationType", "Counter64", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ibDhcpv6Module = ModuleIdentity((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2))
ibDhcpv6Module.setRevisions(('2010-12-29 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ibDhcpv6Module.setRevisionsDescriptions(('Added DHCPv6 module.',))
if mibBuilder.loadTexts: ibDhcpv6Module.setLastUpdated('201012290000Z')
if mibBuilder.loadTexts: ibDhcpv6Module.setOrganization('Infoblox')
if mibBuilder.loadTexts: ibDhcpv6Module.setContactInfo('See IB-SMI-MIB for information.')
if mibBuilder.loadTexts: ibDhcpv6Module.setDescription('This module defines the Infoblox DHCPv6 One MIB.')
ibDHCPv6SubnetTable = MibTable((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 1), )
if mibBuilder.loadTexts: ibDHCPv6SubnetTable.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6SubnetTable.setDescription('A table of DHCPv6 Subnet statistics.')
ibDHCPv6SubnetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 1, 1), ).setIndexNames((0, "IB-DHCPV6ONE-MIB", "ibDHCPv6SubnetNetworkAddress"))
if mibBuilder.loadTexts: ibDHCPv6SubnetEntry.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6SubnetEntry.setDescription('A conceptual row of the ibDHCPv6SubnetEntry containing info about a particular network using DHCPv6.')
ibDHCPv6SubnetNetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 1, 1, 1), IbIpv6Addr()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6SubnetNetworkAddress.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6SubnetNetworkAddress.setDescription('DHCPv6 Subnet in Ipv6Address format. A subnetwork may have many ranges for lease.')
ibDHCPv6SubnetNetworkMask = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6SubnetNetworkMask.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6SubnetNetworkMask.setDescription('DHCPv6 Subnet CIDR.')
ibDHCPv6Statistics = MibIdentifier((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3))
ibDhcpv6TotalNoOfSolicits = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfSolicits.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfSolicits.setDescription('This variable indicates the number of solicit messages received')
ibDhcpv6TotalNoOfRequests = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfRequests.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfRequests.setDescription('This variable indicates the number of request messages received')
ibDhcpv6TotalNoOfReleases = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfReleases.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfReleases.setDescription('This variable indicates the number of release messages received')
ibDhcpv6TotalNoOfAdvertises = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfAdvertises.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfAdvertises.setDescription('This variable indicates the number of advertise messages sent')
ibDhcpv6TotalNoOfReplies = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfReplies.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfReplies.setDescription('This variable indicates the number of reply messages sent')
ibDhcpv6TotalNoOfRenews = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfRenews.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfRenews.setDescription('This variable indicates the number of renew messages sent')
ibDhcpv6TotalNoOfRebinds = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfRebinds.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfRebinds.setDescription('This variable indicates the number of rebind messages received')
ibDhcpv6TotalNoOfDeclines = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfDeclines.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfDeclines.setDescription('This variable indicates the number of decline messages received')
ibDhcpv6TotalNoOfInformationRequests = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfInformationRequests.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfInformationRequests.setDescription('This variable indicates the number of Information-Request messages received')
ibDhcpv6TotalNoOfOthers = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 3, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfOthers.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6TotalNoOfOthers.setDescription('This variable indicates the number of other messages received')
ibDhcpv6DeferredQueueSize = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpv6DeferredQueueSize.setStatus('current')
if mibBuilder.loadTexts: ibDhcpv6DeferredQueueSize.setDescription('The size of deferred dynamic DNS update queue for DHCPv6')
ibDHCPv6DDNSStats = MibIdentifier((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5))
ibDHCPv6DDNSAvgLatency5 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency5.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency5.setDescription('Average Latencies (in microseconds) for DHCPDv6 dynamic DNS updates during the last 5 minutes')
ibDHCPv6DDNSAvgLatency15 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency15.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency15.setDescription('Average Latencies (in microseconds) for DHCPDv6 dynamic DNS updates during the last 15 minutes')
ibDHCPv6DDNSAvgLatency60 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency60.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency60.setDescription('Average Latencies (in microseconds) for DHCPDv6 dynamic DNS updates during the last 60 minutes')
ibDHCPv6DDNSAvgLatency1440 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency1440.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSAvgLatency1440.setDescription('Average Latencies (in microseconds) for DHCPDv6 dynamic DNS updates during the last 1 day')
ibDHCPv6DDNSTimeoutCount5 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount5.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount5.setDescription('The number of timeout DHCPDv6 dynamic DDNS updates during the last 5 minutes')
ibDHCPv6DDNSTimeoutCount15 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount15.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount15.setDescription('The number of timeout DHCPDv6 dynamic DDNS updates during the last 15 minutes')
ibDHCPv6DDNSTimeoutCount60 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount60.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount60.setDescription('The number of timeout DHCPDv6 dynamic DDNS updates during the last 60 minutes')
ibDHCPv6DDNSTimeoutCount1440 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 2, 5, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount1440.setStatus('current')
if mibBuilder.loadTexts: ibDHCPv6DDNSTimeoutCount1440.setDescription('The number of timeout DHCPDv6 dynamic DDNS updates during the last 1 day')
mibBuilder.exportSymbols("IB-DHCPV6ONE-MIB", ibDHCPv6Statistics=ibDHCPv6Statistics, ibDHCPv6DDNSAvgLatency15=ibDHCPv6DDNSAvgLatency15, ibDHCPv6SubnetTable=ibDHCPv6SubnetTable, ibDhcpv6Module=ibDhcpv6Module, ibDHCPv6DDNSTimeoutCount1440=ibDHCPv6DDNSTimeoutCount1440, ibDHCPv6SubnetEntry=ibDHCPv6SubnetEntry, ibDHCPv6DDNSTimeoutCount15=ibDHCPv6DDNSTimeoutCount15, ibDhcpv6TotalNoOfReleases=ibDhcpv6TotalNoOfReleases, ibDhcpv6TotalNoOfRenews=ibDhcpv6TotalNoOfRenews, ibDHCPv6DDNSTimeoutCount60=ibDHCPv6DDNSTimeoutCount60, ibDHCPv6DDNSAvgLatency5=ibDHCPv6DDNSAvgLatency5, PYSNMP_MODULE_ID=ibDhcpv6Module, ibDhcpv6TotalNoOfOthers=ibDhcpv6TotalNoOfOthers, ibDHCPv6DDNSAvgLatency60=ibDHCPv6DDNSAvgLatency60, ibDhcpv6TotalNoOfSolicits=ibDhcpv6TotalNoOfSolicits, ibDhcpv6DeferredQueueSize=ibDhcpv6DeferredQueueSize, ibDHCPv6SubnetNetworkAddress=ibDHCPv6SubnetNetworkAddress, ibDhcpv6TotalNoOfAdvertises=ibDhcpv6TotalNoOfAdvertises, ibDHCPv6DDNSAvgLatency1440=ibDHCPv6DDNSAvgLatency1440, ibDHCPv6DDNSStats=ibDHCPv6DDNSStats, ibDHCPv6DDNSTimeoutCount5=ibDHCPv6DDNSTimeoutCount5, ibDhcpv6TotalNoOfRebinds=ibDhcpv6TotalNoOfRebinds, ibDhcpv6TotalNoOfReplies=ibDhcpv6TotalNoOfReplies, ibDhcpv6TotalNoOfRequests=ibDhcpv6TotalNoOfRequests, ibDHCPv6SubnetNetworkMask=ibDHCPv6SubnetNetworkMask, ibDhcpv6TotalNoOfDeclines=ibDhcpv6TotalNoOfDeclines, ibDhcpv6TotalNoOfInformationRequests=ibDhcpv6TotalNoOfInformationRequests)
| 120.15625 | 1,403 | 0.797573 |
acdff3f47cd8f85929c330e45f44cbbbdebfd1d2 | 10,293 | py | Python | runtime/hetdesrun/backend/service/base_item_router.py | JulianGrote1904/hetida-designer | 05350810eb3e0548c9d8a2a5a6afbf455635b5fd | [
"MIT"
] | null | null | null | runtime/hetdesrun/backend/service/base_item_router.py | JulianGrote1904/hetida-designer | 05350810eb3e0548c9d8a2a5a6afbf455635b5fd | [
"MIT"
] | null | null | null | runtime/hetdesrun/backend/service/base_item_router.py | JulianGrote1904/hetida-designer | 05350810eb3e0548c9d8a2a5a6afbf455635b5fd | [
"MIT"
] | null | null | null | from typing import List
import logging
from uuid import UUID
from fastapi import APIRouter, Path, status, HTTPException
from hetdesrun.backend.models.transformation import TransformationRevisionFrontendDto
from hetdesrun.utils import Type, State
from hetdesrun.persistence.dbservice.revision import (
read_single_transformation_revision,
store_single_transformation_revision,
select_multiple_transformation_revisions,
update_or_create_single_transformation_revision,
)
from hetdesrun.persistence.dbservice.exceptions import DBNotFoundError, DBIntegrityError
from hetdesrun.backend.service.component_router import generate_code
logger = logging.getLogger(__name__)
base_item_router = APIRouter(
prefix="/base-items",
tags=["base items"],
responses={
status.HTTP_401_UNAUTHORIZED: {"description": "Unauthorized"},
status.HTTP_403_FORBIDDEN: {"description": "Forbidden"},
status.HTTP_404_NOT_FOUND: {"description": "Not Found"},
status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Internal server error"},
},
)
@base_item_router.get(
"/",
response_model=List[TransformationRevisionFrontendDto],
response_model_exclude_unset=True, # needed because:
# frontend handles attributes with value null in a different way than missing attributes
summary="Returns combined list of all base items (components and workflows)",
status_code=status.HTTP_200_OK,
responses={status.HTTP_200_OK: {"description": "Successfully got all base items"}},
deprecated=True,
)
async def get_all_transformation_revisions() -> List[TransformationRevisionFrontendDto]:
"""Get all transformation revisions without their content from the data base.
This endpoint is deprecated and will be removed soon,
use GET /api/transformations/ instead
"""
logger.info("get all transformation revisions")
try:
transformation_revision_list = select_multiple_transformation_revisions()
except DBIntegrityError as e:
raise HTTPException(
status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"At least one entry in the DB is no valid transformation revision:\n{str(e)}",
) from e
transformation_revision_dto_list = [
TransformationRevisionFrontendDto.from_transformation_revision(tr)
for tr in transformation_revision_list
]
return transformation_revision_dto_list
@base_item_router.get(
"/{id}",
response_model=TransformationRevisionFrontendDto,
response_model_exclude_unset=True, # needed because:
# frontend handles attributes with value null in a different way than missing attributes
summary="Returns the base item with the given id.",
status_code=status.HTTP_200_OK,
responses={status.HTTP_200_OK: {"description": "Successfully got the base item"}},
deprecated=True,
)
async def get_transformation_revision_by_id(
# pylint: disable=W0622
id: UUID = Path(
...,
example=UUID("123e4567-e89b-12d3-a456-426614174000"),
),
) -> TransformationRevisionFrontendDto:
"""Get a single transformation revision without its content from the data base by its id.
This endpoint is deprecated and will be removed soon,
use GET /api/transformations/{id} instead.
"""
logger.info("get base item %s", id)
try:
transformation_revision = read_single_transformation_revision(id)
logger.info("found transformation revision with id %s", id)
except DBNotFoundError as e:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail=str(e)) from e
transformation_revision_dto = (
TransformationRevisionFrontendDto.from_transformation_revision(
transformation_revision
)
)
logger.debug(transformation_revision_dto.json())
return transformation_revision_dto
@base_item_router.post(
"/",
response_model=TransformationRevisionFrontendDto,
response_model_exclude_unset=True, # needed because:
# frontend handles attributes with value null in a different way than missing attributes
summary="Creates a new item.",
status_code=status.HTTP_201_CREATED,
responses={
status.HTTP_201_CREATED: {"description": "Successfully created the item"}
},
deprecated=True,
)
async def create_transformation_revision(
transformation_revision_dto: TransformationRevisionFrontendDto,
) -> TransformationRevisionFrontendDto:
"""Store a transformation revision without content in the data base.
This endpoint is deprecated and will be removed soon,
use POST /api/transformations/ instead.
"""
logger.info("create base item %s", transformation_revision_dto.id)
transformation_revision = transformation_revision_dto.to_transformation_revision(
documentation=(
"\n"
"# New Component/Workflow\n"
"## Description\n"
"## Inputs\n"
"## Outputs\n"
"## Details\n"
"## Examples\n"
)
)
if transformation_revision.type == Type.COMPONENT:
logger.debug("transformation revision has type %s", Type.COMPONENT)
transformation_revision.content = generate_code(
transformation_revision.to_code_body()
)
logger.debug("generated code:\n%s", transformation_revision.content)
try:
store_single_transformation_revision(transformation_revision)
logger.info("created base item")
except DBIntegrityError as e:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) from e
try:
persisted_transformation_revision = read_single_transformation_revision(
transformation_revision.id
)
except DBNotFoundError as e:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail=str(e)) from e
persisted_transformation_dto = (
TransformationRevisionFrontendDto.from_transformation_revision(
persisted_transformation_revision
)
)
logger.debug(persisted_transformation_dto.json())
return persisted_transformation_dto
@base_item_router.put(
"/{id}",
response_model=TransformationRevisionFrontendDto,
response_model_exclude_unset=True, # needed because:
# frontend handles attributes with value null in a different way than missing attributes
summary="Updates basic attributes of a component or workflow.",
status_code=status.HTTP_201_CREATED,
responses={
status.HTTP_201_CREATED: {"description": "Successfully updated the item"}
},
deprecated=True,
)
async def update_transformation_revision(
# pylint: disable=W0622
id: UUID,
updated_transformation_revision_dto: TransformationRevisionFrontendDto,
) -> TransformationRevisionFrontendDto:
"""Update or store a transformation revision except for its content in the data base.
If no DB entry with the provided id is found, it will be created.
Updating a transformation revision is only possible if it is in state DRAFT
or to change the state from RELEASED to DISABLED.
This endpoint is deprecated and will be removed soon,
use PUT /api/transformations/{id} instead.
"""
logger.info("update base item %s", id)
if id != updated_transformation_revision_dto.id:
msg = (
f"The id {id} does not match "
f"the id of the provided base item DTO {updated_transformation_revision_dto.id}"
)
logger.error(msg)
raise HTTPException(status.HTTP_403_FORBIDDEN, detail=msg)
updated_transformation_revision = (
updated_transformation_revision_dto.to_transformation_revision()
)
try:
existing_transformation_revision = read_single_transformation_revision(id)
if (
existing_transformation_revision.type
!= updated_transformation_revision_dto.type
):
msg = (
f"The type ({updated_transformation_revision_dto.type}) "
f"of the provided transformation revision does not\n"
f"match the type ({existing_transformation_revision.type}) "
f"of the stored transformation revision {id}!"
)
logger.error(msg)
raise HTTPException(status.HTTP_403_FORBIDDEN, detail=msg)
updated_transformation_revision.content = (
existing_transformation_revision.content
)
if existing_transformation_revision.type == Type.COMPONENT:
updated_transformation_revision.content = (
updated_transformation_revision.content
) = generate_code(updated_transformation_revision.to_code_body())
updated_transformation_revision.documentation = (
existing_transformation_revision.documentation
)
if existing_transformation_revision.state == State.RELEASED:
if updated_transformation_revision_dto.state == State.DISABLED:
logger.info("deprecate transformation revision %s", id)
updated_transformation_revision = existing_transformation_revision
updated_transformation_revision.deprecate()
else:
msg = f"cannot modify released component {id}"
logger.error(msg)
raise HTTPException(status.HTTP_403_FORBIDDEN, detail=msg)
except DBNotFoundError:
# base/example workflow deployment needs to be able to put
# with an id and either create or update the component revision
pass
try:
persisted_transformation_revision = (
update_or_create_single_transformation_revision(
updated_transformation_revision
)
)
logger.info("updated base item %s", id)
except DBIntegrityError as e:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) from e
except DBNotFoundError as e:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail=str(e)) from e
persisted_transformation_dto = (
TransformationRevisionFrontendDto.from_transformation_revision(
persisted_transformation_revision
)
)
logger.debug(persisted_transformation_dto.json())
return persisted_transformation_dto
| 36.5 | 98 | 0.711746 |
acdff3fdaba67bacc2874c2068819e6e7b3e095e | 1,038 | py | Python | models/loss.py | chaytonmin/Off-Road-Freespace-Detection-Dataset-and-Benchmark | 3a2ac05bb9eb62cbb6fe31e3d09ed053482f0de8 | [
"MIT"
] | 3 | 2021-10-15T08:59:13.000Z | 2022-02-05T17:15:58.000Z | models/loss.py | chaytonmin/Off-Road-Freespace-Detection-Dataset-and-Benchmark | 3a2ac05bb9eb62cbb6fe31e3d09ed053482f0de8 | [
"MIT"
] | 1 | 2021-11-11T09:31:42.000Z | 2021-11-12T16:03:56.000Z | models/loss.py | chaytonmin/Off-Road-Freespace-Detection-Dataset-and-Benchmark | 3a2ac05bb9eb62cbb6fe31e3d09ed053482f0de8 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn import init
import torchvision
from torch.optim import lr_scheduler
import torch.nn.functional as F
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
lambda_rule = lambda epoch: opt.lr_gamma ** ((epoch+1) // opt.lr_decay_epochs)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer,step_size=opt.lr_decay_iters, gamma=0.1)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
class SegmantationLoss(nn.Module):
def __init__(self, class_weights=None):
super(SegmantationLoss, self).__init__()
self.loss = nn.CrossEntropyLoss(weight=class_weights)
def __call__(self, output, target, pixel_average=True):
if pixel_average:
return self.loss(output, target) #/ target.data.sum()
else:
return self.loss(output, target)
| 38.444444 | 97 | 0.707129 |
acdff45407318d8d58edd5408b9e454df5e9ec6c | 8,946 | py | Python | src/data.py | CedricFauth/dolist-client | 67ab15388e732c61639c9abeec5c42d6b07e19ff | [
"MIT"
] | 17 | 2020-11-07T01:18:09.000Z | 2021-08-21T22:18:33.000Z | src/data.py | CedricFauth/dolist-client | 67ab15388e732c61639c9abeec5c42d6b07e19ff | [
"MIT"
] | 1 | 2020-11-07T15:38:51.000Z | 2020-11-07T15:39:02.000Z | src/data.py | CedricFauth/dolist-client | 67ab15388e732c61639c9abeec5c42d6b07e19ff | [
"MIT"
] | 2 | 2020-11-10T15:43:09.000Z | 2020-11-11T06:54:13.000Z | '''
MIT License
Copyright (c) 2020 Cedric Fauth
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import re
import logging
from cli import Output as O
from datetime import datetime, date, timedelta
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.ERROR)
class Dataparser():
days_to_int = {"mon" : 0, "tue" : 1, "wed" : 2, "thu" : 3,
"fri" : 4, "sat" : 5, "sun" : 6 }
@staticmethod
def validate(args):
'''
validates input values of the user
'''
# only need to validate if cmd is event or task
if args.cmd == 'event' or args.cmd == 'task':
# -t -d -f are available but -d is optional
# try to match -f w/o/d
if not re.match('^(w|weekly|d|daily|o|once)$', args.f):
O.error(f'wrong frequency format: {args.f}')
return False
# if daily: no date/day set
if args.f[0] == 'd':
if args.d != None:
O.error(f'you cannot use -d here because the {args.cmd} is daily.')
return False
# if once: date YYYY-MM-DD needs to be set
elif args.f[0] == 'o':
if not args.d or not re.match('^((\d\d\d\d)-(0[1-9]|1[0-2])-(0[1-9]|(1|2)[0-9]|3[0-1]))$', args.d):
O.error(f'wrong date format: {args.d}')
return False
# if weekly: day needs to be set
else:
if not args.d or not re.match('^(mon|tue|wed|thu|fri|sat|sun)$', args.d):
O.error(f'wrong day format: {args.d}')
return False
# if event try to match HH:MM-HH:MM
if args.cmd == 'event':
if not re.match('^([0-1][0-9]|2[0-3]):[0-5][0-9]-([0-1][0-9]|2[0-3]):[0-5][0-9]$', args.t):
O.error(f'wrong time format: {args.t}')
return False
# if event try to match HH:MM
else:
if not re.match('^([0-1][0-9]|2[0-3]):[0-5][0-9]$', args.t):
O.error(f'wrong time format: {args.t}')
return False
return True
@staticmethod
def parse(c, title, day_date, time, freq):
'''
weekly event data gets prepared for database
'''
f = freq[0]
day = None
date = None
if f == 'o':
date = day_date
elif f == 'w':
day = Dataparser.days_to_int[day_date]
if c =='e':
t = time.split('-')
return (title, day ,t[0], t[1], f, date)
else:
tmp = (0, title, day, time, f, date, )
previous_date = Dataparser.last_deadline(tmp)
if previous_date:
return (title, day, time, f, date, previous_date.strftime('%Y-%m-%d %H:%M'))
else:
return (title, day, time, f, date)
@staticmethod
def nearest_deadline(task):
freq = task[4]
# date now
dt = datetime.now()
date_str = dt.date().isoformat()
# time now
current_time_str = dt.strftime("%H:%M")
current_datetime = datetime.fromisoformat(f'{date_str} {current_time_str}')
deadline_datetime = datetime.fromisoformat(f'{date_str} {task[3]}')
if freq == 'w':
while 1:
if deadline_datetime.weekday() == task[2]:
if deadline_datetime > current_datetime:
break
deadline_datetime += timedelta(1)
# check if last deadline was missed
if task[7]:
last_done = datetime.fromisoformat(task[7])
if deadline_datetime - last_done > timedelta(7):
deadline_datetime -= timedelta(7)
elif freq == 'd':
if deadline_datetime <= current_datetime:
deadline_datetime += timedelta(1)
# check if last deadline was missed
if task[7]:
last_done = datetime.fromisoformat(task[7])
if deadline_datetime - last_done > timedelta(1):
deadline_datetime -= timedelta(1)
else:
deadline_datetime = datetime.fromisoformat(f'{task[5]} {task[3]}')
logger.debug(f'nearest_deadline {deadline_datetime} for {task}')
return deadline_datetime
@staticmethod
def last_deadline(task):
freq = task[4]
# date now
dt = datetime.now()
date_str = dt.date().isoformat()
# time now
current_time_str = dt.strftime("%H:%M")
current_datetime = datetime.fromisoformat(f'{date_str} {current_time_str}')
deadline_datetime = datetime.fromisoformat(f'{date_str} {task[3]}')
if freq == 'w':
while 1:
if deadline_datetime.weekday() == task[2]:
if deadline_datetime < current_datetime:
break
deadline_datetime -= timedelta(1)
elif freq == 'd':
if deadline_datetime > current_datetime:
deadline_datetime -= timedelta(1)
else:
deadline_datetime = None
logger.debug(f'last_deadline {deadline_datetime} for {task}')
return deadline_datetime
@staticmethod
def delta_to_tupel(tdelta):
hours, rem = divmod(tdelta.seconds, 3600)
minutes = rem // 60 # + 1
return (tdelta.days, hours, minutes, )
@staticmethod
def prepare_out_events(events):
"""
creates list of events with additional attibs like 'time left'
"""
event_list = []
daytime = datetime.today()
day = date.today()
for e in events:
start_time = datetime.fromisoformat(f'{day.isoformat()} {e[3]}')
end_time = datetime.fromisoformat(f'{day.isoformat()} {e[4]}')
dstart = start_time - daytime
dend = end_time - daytime
dstart = Dataparser.delta_to_tupel(dstart)
if dend.days >= 0:
event_list.append(e+dstart+(True, ))
else:
event_list.append(e+dstart+(False, ))
return event_list
@staticmethod
def prepare_out_tasks(tasks):
"""
creates list of tasks with additional attibs like 'time left'
"""
task_list = []
daytime = datetime.today()
day = date.today()
for t in tasks:
left = None
# info: stays missed until new day begins
deadline_datetime = Dataparser.nearest_deadline(t)
logger.debug(deadline_datetime)
left = deadline_datetime - daytime
logger.debug(f'left{left}')
task_list.append(t + Dataparser.delta_to_tupel(left))
def time_left_to_str(x):
#if x[8] < 0:
# y = int(f'{x[8]}{((x[9]*(-1) + 24) % 24):02}{x[10]:02}')
#else:
y = int(f'{x[8]}{x[9]:02}{x[10]:02}')
return y
return sorted(task_list, key=time_left_to_str)
@staticmethod
def get_reset_ids(tasks_done):
# TODO maybe wrong because of many changes
task_ids = []
daytime = datetime.today()
day = date.today()
for t in tasks_done:
left = None
if t[7] != None:
task_time = datetime.fromisoformat(t[7])
left = task_time - daytime
if left.days < 0:
task_ids.append((t[0],t[4], ))
#elif t[4] == 'o' and t[7] != None:
# task_ids.append((t[0],t[4], ))
# check if last-done is before last due: reset
#if datetime.fromisoformat(t[7]) == task_time:
# logger.info(f'{t[1]} can be deleted')
# task_ids.append(t[0])
return task_ids
| 36.663934 | 115 | 0.544825 |
acdff45e0a4cba402c6d23ea47a72ef784399a1f | 1,068 | py | Python | setup.py | AgrinessEdgeIoT/portiapy | efc5e4af2d94c57aa03b447ee015755532baaf70 | [
"MIT"
] | 1 | 2019-05-22T18:38:01.000Z | 2019-05-22T18:38:01.000Z | setup.py | AgrinessEdgeIoT/portiapy | efc5e4af2d94c57aa03b447ee015755532baaf70 | [
"MIT"
] | 4 | 2018-09-17T13:16:18.000Z | 2021-02-26T13:30:49.000Z | setup.py | AgrinessEdgeIoT/portiapy | efc5e4af2d94c57aa03b447ee015755532baaf70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from os.path import join
from setuptools import setup, find_packages
MODULE_NAME = 'portiapy'
REPO_NAME = 'portiapy'
with open('./README.md') as f:
readme = f.read()
setup(
name=MODULE_NAME,
description='A small package for handling Agriness Edge\'s REST API',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/AgrinessEdgeIoT/{:s}'.format(REPO_NAME),
author='Matheus Mota, Lucas Góes',
author_email='matheus@agrinessedge.com, lucas@agrinessedge.com',
packages=find_packages(exclude=('binder', 'tests', 'docs')),
version=open(join(MODULE_NAME, 'VERSION')).read().strip(),
install_requires=['requests>=2.20.0', 'pytz>=2018.5',
'python-dateutil>=2.7.3', 'plotly>=3.1.1',
'pandas>=0.23.4', 'arrow>=0.12.1'],
classifiers=(
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
),
include_package_data=True
)
| 30.514286 | 73 | 0.645131 |
acdff4afb10166a14d2e468db690ee04bb0eae10 | 297 | py | Python | 101 - 115/utilidadescev/dados/__init__.py | SocrammBR/Desafios-Python-CursoEmVideo | bd2454a24134500343ece91b936c169d3a66f89e | [
"MIT"
] | null | null | null | 101 - 115/utilidadescev/dados/__init__.py | SocrammBR/Desafios-Python-CursoEmVideo | bd2454a24134500343ece91b936c169d3a66f89e | [
"MIT"
] | null | null | null | 101 - 115/utilidadescev/dados/__init__.py | SocrammBR/Desafios-Python-CursoEmVideo | bd2454a24134500343ece91b936c169d3a66f89e | [
"MIT"
] | null | null | null | import colorama
colorama.init(autoreset=True)
def leiaDinheiro(num):
num = num.replace(',', '.')
n = num.replace('.', '')
while n.isnumeric() == False:
print('\033[31mERRO! Digite um valor válido')
num = input('Digite um valor: ')
n = num.replace('.', '')
return int(num)
| 21.214286 | 49 | 0.602694 |
acdff51b65f5c5bcaa0ccdbabe1c3d2bfca868fb | 1,346 | py | Python | ox/loader.py | praemandatum/contact-sync | 96afe27d56e686b160fea67162a2ff56f6d0a6a8 | [
"Apache-2.0"
] | 1 | 2015-10-25T08:15:53.000Z | 2015-10-25T08:15:53.000Z | ox/loader.py | praemandatum/contact-sync | 96afe27d56e686b160fea67162a2ff56f6d0a6a8 | [
"Apache-2.0"
] | 1 | 2015-11-17T13:45:42.000Z | 2015-11-21T16:28:57.000Z | ox/loader.py | praemandatum/contact-sync | 96afe27d56e686b160fea67162a2ff56f6d0a6a8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import requests
import json
from urlparse import urljoin
import response
class OXContactLoader(object):
def __init__(self, session, folder, columns):
self.__session = session
self.__folder = folder
self.__columns = columns
def loadUpdates(self, since):
"""Load contacts that changed since given timestamp."""
payload = self.__build_request("updates")
payload["timestamp"] = since
raw = self.__request(payload)
return response.UpdatesResponse.parse(raw, self.__columns)
def load(self, since):
"""Load all contacts."""
payload = self.__build_request("all")
res = self.__request(payload)
return res.get("data")
def __build_request(self, action):
return {
'session': self.__session.token,
'folder': self.__folder,
'columns': ",".join([str(c) for c in self.__columns]),
"action": action
}
def __request(self, payload):
r = requests.get(urljoin(self.__session.baseUrl, "/ajax/contacts"), params=payload,
cookies=self.__session.cookie)
if r.status_code != 200:
raise Exception("Could not get OX contacts!\n" + r.text)
return json.loads(r.text)
| 26.392157 | 91 | 0.603269 |
acdff5850d264a020c1f3c55712e95cab31c511b | 176 | py | Python | demos/__init__.py | kaustubhgupta/PyWebIO | adea34e91643616afc7aa759433a2c820798a9ba | [
"MIT"
] | 1 | 2021-11-30T08:52:32.000Z | 2021-11-30T08:52:32.000Z | demos/__init__.py | kaustubhgupta/PyWebIO | adea34e91643616afc7aa759433a2c820798a9ba | [
"MIT"
] | null | null | null | demos/__init__.py | kaustubhgupta/PyWebIO | adea34e91643616afc7aa759433a2c820798a9ba | [
"MIT"
] | 1 | 2021-05-26T05:17:09.000Z | 2021-05-26T05:17:09.000Z | r"""
.. automodule:: demos.bmi
.. automodule:: demos.input_usage
.. automodule:: demos.output_usage
.. automodule:: demos.chat_room
.. automodule:: demos.markdown_previewer
""" | 25.142857 | 40 | 0.732955 |
acdff5d29150f42a755cf4dbb0917ed2842c6a25 | 803 | py | Python | django/contrib/gis/db/models/sql/conversion.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 3 | 2016-07-08T23:49:32.000Z | 2018-04-15T22:55:01.000Z | django/contrib/gis/db/models/sql/conversion.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 27 | 2017-02-05T15:57:04.000Z | 2018-04-15T22:57:26.000Z | django/contrib/gis/db/models/sql/conversion.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | null | null | null | """
This module holds simple classes used by GeoQuery.convert_values
to convert geospatial values from the database.
"""
class BaseField(object):
empty_strings_allowed = True
def get_internal_type(self):
"Overloaded method so OracleQuery.convert_values doesn't balk."
return None
class AreaField(BaseField):
"Wrapper for Area values."
def __init__(self, area_att):
self.area_att = area_att
class DistanceField(BaseField):
"Wrapper for Distance values."
def __init__(self, distance_att):
self.distance_att = distance_att
class GeomField(BaseField):
"""
Wrapper for Geometry values. It is a lightweight alternative to
using GeometryField (which requires a SQL query upon instantiation).
"""
pass
| 28.678571 | 73 | 0.693649 |
acdff7a1fb43e7bc60b76f8205e461b4f332817f | 1,282 | py | Python | src/sentry/status_checks/celery_alive.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 4 | 2016-03-16T07:21:36.000Z | 2017-09-04T07:29:56.000Z | src/sentry/status_checks/celery_alive.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 8 | 2019-12-28T23:49:55.000Z | 2022-03-02T04:34:18.000Z | src/sentry/status_checks/celery_alive.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 1 | 2017-04-08T04:09:18.000Z | 2017-04-08T04:09:18.000Z | from __future__ import absolute_import
from time import time
from django.conf import settings
from django.core.urlresolvers import reverse
from sentry import options
from sentry.utils.http import absolute_uri
from .base import Problem, StatusCheck
class CeleryAliveCheck(StatusCheck):
def check(self):
# There is no queue, and celery is not running, so never show error
if settings.CELERY_ALWAYS_EAGER:
return []
last_ping = options.get('sentry:last_worker_ping') or 0
if last_ping >= time() - 300:
return []
backlogged, size = None, 0
from sentry.monitoring.queues import backend
if backend is not None:
size = backend.get_size('default')
backlogged = size > 0
message = "Background workers haven't checked in recently. "
if backlogged:
message += "It seems that you have a backlog of %d tasks. Either your workers aren't running or you need more capacity." % size
else:
message += "This is likely an issue with your configuration or the workers aren't running."
return [
Problem(
message,
url=absolute_uri(reverse('sentry-admin-queue')),
),
]
| 31.268293 | 139 | 0.634945 |
acdff81f1b70d19889c0fbc95f2fd07f4ae62902 | 5,203 | py | Python | sisyphus/util/concurrent/futures/thread.py | libfirm/sisyphus | 107419fd895c929423a58c1c47d3245cd2e25fed | [
"MIT"
] | 5 | 2017-03-15T04:45:56.000Z | 2020-09-09T23:23:59.000Z | sisyphus/util/concurrent/futures/thread.py | PaulBone/sisyphus | b30cf61707ced65937c843c3adcf9761b2704413 | [
"MIT"
] | 1 | 2020-09-09T23:33:27.000Z | 2020-09-09T23:33:27.000Z | sisyphus/util/concurrent/futures/thread.py | PaulBone/sisyphus | b30cf61707ced65937c843c3adcf9761b2704413 | [
"MIT"
] | 2 | 2018-02-07T17:59:53.000Z | 2020-09-09T23:24:20.000Z | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
from __future__ import with_statement
import atexit
import threading
import weakref
import sys
import base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_thread_references = set()
_shutdown = False
def _remove_dead_thread_references():
"""Remove inactive threads from _thread_references.
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
... t = ThreadPoolExecutor(max_workers=5)
... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
if thread_reference() is None:
_thread_references.discard(thread_reference)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
id = "%s(%s,%s)" % (self.fn, self.args, self.kwargs)
base.LOGGER.debug('run WorkItem: '+id)
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
base.LOGGER.debug('WorkItem(%s) finished with exception: %s' % (id, e))
else:
self.future.set_result(result)
base.LOGGER.debug('WorkItem(%s) finished with result: %s' % (id, result))
def _worker(executor_reference, work_queue):
try:
while True:
try:
work_item = work_queue.get(block=True, timeout=0.1)
except queue.Empty:
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
return
del executor
else:
work_item.run()
except BaseException:
base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(base.Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
_remove_dead_thread_references()
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
base.LOGGER.debug('ThreadPoolExecutor with %d workers max' % max_workers)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
msg = 'cannot schedule new futures after shutdown'
base.LOGGER.error(msg)
raise RuntimeError(msg)
f = base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = base.Executor.submit.__doc__
def _adjust_thread_count(self):
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self), self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_thread_references.add(weakref.ref(t))
base.LOGGER.info('ThreadPoolExecutor spawned a new thread, now at %d' % len(self._threads))
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
if wait:
base.LOGGER.debug('ThreadPoolExecutor shutting down, waiting for threads')
for t in self._threads:
t.join()
base.LOGGER.info('ThreadPoolExecutor shut down')
shutdown.__doc__ = base.Executor.shutdown.__doc__
| 35.882759 | 103 | 0.628868 |
acdff8a0d188c1a2dfe5bd150ec90b9af0beb585 | 8,668 | py | Python | dlp/synth.py | cclauss/google-cloud-python | 90f3d0b5304fd8aef9cedc35d9323d257b5a36fb | [
"Apache-2.0"
] | 1 | 2021-06-30T11:43:47.000Z | 2021-06-30T11:43:47.000Z | dlp/synth.py | cclauss/google-cloud-python | 90f3d0b5304fd8aef9cedc35d9323d257b5a36fb | [
"Apache-2.0"
] | null | null | null | dlp/synth.py | cclauss/google-cloud-python | 90f3d0b5304fd8aef9cedc35d9323d257b5a36fb | [
"Apache-2.0"
] | 1 | 2021-06-30T11:44:03.000Z | 2021-06-30T11:44:03.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
client_library_version = "0.7.0"
library = gapic.py_library(
"dlp", "v2", config_path="/google/privacy/dlp/artman_dlp_v2.yaml"
)
s.copy(library, excludes=["README.rst", "nox.py"])
# Set Release Status
release_status = "Development Status :: 3 - Alpha"
s.replace("setup.py", "(release_status = )(.*)$", f"\\1'{release_status}'")
# Set version
s.replace("setup.py", "version = .*", f"version = '{client_library_version}'")
# Fix namespace
s.replace(
"**/*.py",
"google\.cloud\.privacy\.dlp_v2",
"google.cloud.dlp_v2"
)
# Add changelog to index.rst
s.replace(
"docs/index.rst",
" gapic/v2/types",
" gapic/v2/types\n changelog\n"
)
# Add newlines to end of files
s.replace(
["google/__init__.py", "google/cloud/__init__.py"],
"__path__ = pkgutil.extend_path\(__path__, __name__\)",
"\g<0>\n"
)
# Add missing utf-8 marker
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"# Generated by the protocol buffer compiler. DO NOT EDIT!",
"# -*- coding: utf-8 -*-\n\g<0>"
)
# Fix unindentation of bullet list second line
s.replace(
"google/cloud/dlp_v2/gapic/dlp_service_client.py",
"( \* .*\n )([^\s*])",
"\g<1> \g<2>"
)
s.replace(
"google/cloud/dlp_v2/gapic/dlp_service_client.py",
"(\s+)\*.*\n\s+::\n\n",
"\g<1> ::\n"
)
# Fix raw-latex bits in storage_pb2.py
s.replace(
"google/cloud/dlp_v2/proto/storage_pb2.py",
"number regex.*\n(\s+)latex:.*\n",
"number regex \"(\\d{3}) \\d{3}-\\d{4} \"\\\n"
"\g<1>could be adjusted upwards if the area code is \\\n"
)
# Fix Docstrings in google/cloud/dlp_v2/proto/storage_pb2.py
s.replace(
"google/cloud/dlp_v2/proto/storage_pb2.py",
"(hotword_regex:)\n(\s+Regular expression.*)\n",
"\g<1> \\\n\g<2> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/storage_pb2.py",
"(proximity:)\n(\s+Proximity.*)\n(\s+reside..*)\n(\s+characters..*)\n"
"(\s+the window.*)\n(\s+of the finding.*)\n(\s+number regex.*)\n"
"(\s+latex:.*)\n(\s+known to be the local.*)\n(\s+hotword regex.*)\n",
"\g<1> \\\n\g<2> \\\n\g<3> \\\n\g<4> \\\n\g<5> \\\n\g<6> \\\n\g<7> "
"\\\n\g<8> \\\n\g<9> \\\n\g<10> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/storage_pb2.py",
"(likelihood_adjustment:)\n",
"\g<1> \\\n"
)
# Fix Docstrings in google/cloud/dlp_v2/proto/dlp_pb2.py
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(max_findings_per_item:)\n(\s+Max number.*)\n(\s+scanned. When.*)\n"
"(\s+maximum returned is 1000.*)\n(\s+When set within.*)\n",
"\g<1> \\\n\g<2> \\\n\g<3> \\\n\g<4> \\\n\g<5> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(max_findings_per_request:)\n(\s+Max number of.*)\n(\s+When set .*)\n",
"\g<1> \\\n\g<2> \\\n\g<3> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(max_findings_per_info_type:)\n",
"\g<1> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(snapshot_inspect_template:)\n(\s+If run with an .*)\n",
"\g<1> \\\n\g<2> \\\n"
)
to_replace = [
"processed_bytes:", "total_estimated_bytes:", "info_type_stats:",
"Statistics of how many instances of each info type were found",
"requested_options:",
]
for replace in to_replace:
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
f"({replace})\n",
"\g<1> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(sensitive_value_frequency_lower_bound:)\n(\s+Lower bound.*)\n",
"\g<1> \\\n\g<2> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(sensitive_value_frequency_upper_bound:)\n(\s+Upper bound.*)\n",
"\g<1> \\\n\g<2> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(bucket_size:)\n(\s+Total number of equivalence.*)\n",
"\g<1> \\\n\g<2>\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(bucket_values:)\n(\s+Sample of equivalence.*)\n",
"\g<1> \\\n\g<2> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(offset_minutes:)\n(\s+Set only.*)\n",
"\g<1> \\\n\g<2> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(result:)\n(\s+A summary of the outcome of this inspect job.)",
"\g<1> \\\n\g<2>"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(storage_config:)\n(\s+The data to scan.\n)",
"\g<1> \\\n\g<2>"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(inspect_config:)\n(\s+How and what to scan for.\n)",
"\g<1> \\\n\g<2>"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(inspect_template_name:)\n(\s+If provided, will be.*)\n"
"(\s+InspectConfig.*)\n(\s+values persisted.*)\n(\s+actions:)\n"
"(\s+Actions to.*)\n",
"\g<1> \\\n\g<2> \\\n\g<3> \\\n\g<4> \\\n\g<5> \\\n\g<6> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
" (\s+Set of values defining the equivalence class.*)\n"
" (\s+quasi-identifier.*)\n"
" (\s+message. The order.*)\n",
"\g<1> \\\n\g<2> \\\n\g<3>\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
" (\s+Size of the equivalence class, for example number of rows with)\n"
" (\s+the above set of values.)\n",
"\g<1> \\\n\g<2>\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(equivalence_class_size_lower_bound:)\n(\s+Lower bound.*)\n",
"\g<1> \\\n\g<2> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(equivalence_class_size_upper_bound:)\n(\s+Upper bound.*)\n",
"\g<1> \\\n\g<2> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(bucket_value_count:)\n(\s+Total number of distinct equivalence.*)\n",
"\g<1> \\\n\g<2>\n"
)
# Docstrings from categorical histogram bucket
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(value_frequency_lower_bound:)\n\s+(Lower bound.*)\n\s+(bucket.\n)"
"(\s+value_frequency_upper.*)\n\s+(Upper.*)\n\s+(bucket.\n)"
"(\s+bucket_size:)\n\s+(Total.*\n)"
"(\s+bucket_values:)\n\s+(Sample of value.*)\n\s+(of values.*\n)"
"(\s+bucket_value_count:)\n\s+(Total number.*\n)",
"\g<1> \g<2> \g<3>\g<4> \g<5> \g<6>\g<7> \g<8>"
"\g<9> \g<10> \g<11>\g<12> \g<13>"
)
# Fix docstrings tagged field indentation issues in dlp_pb2.py
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(DESCRIPTOR .*_TAGGEDFIELD,\n\s+__module__.*\n\s+,\n\s+__doc__.*\n)"
"(\s+field:)\n(\s+Identifies .*)\n(\s+tag:)\n(\s+Semantic.*)\n"
"(\s+determine.*)\n(\s+reidentifiability.*)\n(\s+info_type:)\n"
"(\s+A column.*)\n(\s+public dataset.*)\n(\s+available.*)\n(\s+ages.*)\n"
"(\s+supported Info.*)\n(\s+supported.*)\n(\s+custom_tag:)\n(\s+A col.*)\n"
"(\s+user must.*)\n(\s+statist.*)\n(\s+\(below.*)\n(\s+inferred:)\n"
"(\s+If no semantic.*)\n",
"\g<1>\g<2> \\\n\g<3>\n\g<4> \\\n\g<5> \\\n\g<6> \\\n"
"\g<7> \\\n\g<8> \\\n\g<9> \\\n\g<10> \\\n\g<11> \\\n\g<12> \\\n"
"\g<13> \\\n\g<14>\n\g<15> \\\n\g<16> \\\n\g<17> \\\n\g<18> \\\n"
"\g<19>\n\g<20> \\\n\g<21> \\\n"
)
s.replace(
"google/cloud/dlp_v2/proto/dlp_pb2.py",
"(////////.*)\n\s+(///////////////\n)",
"\g<1> \g<2>"
)
# Fix Docstrings in google/cloud/dlp_v2/gapic/dlp_service_client.py
s.replace(
"google/cloud/dlp_v2/gapic/dlp_service_client.py",
"(- ``CryptoReplaceFfxFpeConfig``\n)(\s+If a dict is provided.*\n)"
"(\s+message.*\n)",
"\g<1> \g<2> \g<3>"
)
s.replace(
"google/cloud/dlp_v2/gapic/dlp_service_client.py",
" ::\n (\s+- `state`.*\n) (\s+- `inspected_storage`.*\n)"
" (\s+- `trigger_name`.*\n)",
"* Supported fields/values for inspect jobs:\n\g<1>\g<2>\g<3>"
)
s.replace(
"google/cloud/dlp_v2/gapic/dlp_service_client.py",
" ::\n (\s+- `state`.*\n)(\s+\* The operator must be)",
"* Supported fields for risk analysis jobs:\n\g<1>\g<2>"
)
| 29.383051 | 79 | 0.591024 |
acdff9223393079af8db82e3502db51a81c1075d | 93 | py | Python | I'm bored with life.py | NehaAgarwal2598/Codeforces | 859196a734cb24834736adf9517b351647da6e73 | [
"MIT"
] | null | null | null | I'm bored with life.py | NehaAgarwal2598/Codeforces | 859196a734cb24834736adf9517b351647da6e73 | [
"MIT"
] | null | null | null | I'm bored with life.py | NehaAgarwal2598/Codeforces | 859196a734cb24834736adf9517b351647da6e73 | [
"MIT"
] | null | null | null | import math
a, b = list(map(int, input().split()))
c = math.factorial(min(a,b))
print(c)
| 18.6 | 39 | 0.612903 |
acdff92d6c6dae4acc1fa99bab23d58babe96065 | 1,970 | py | Python | db/crud.py | aviloncho/mutants | 343a2d5b86d053e901bee26fb7399a30c38ef5e4 | [
"MIT"
] | null | null | null | db/crud.py | aviloncho/mutants | 343a2d5b86d053e901bee26fb7399a30c38ef5e4 | [
"MIT"
] | null | null | null | db/crud.py | aviloncho/mutants | 343a2d5b86d053e901bee26fb7399a30c38ef5e4 | [
"MIT"
] | null | null | null | from sqlalchemy.orm import Session
from sqlalchemy.orm.exc import NoResultFound
import db.models as models
import api.schemas as schemas
from utils import hash_utils
def get_dnas_analysis(db: Session, skip: int = 0, limit: int = 100):
"""Get all DnaAnalysis stored in the database."""
return db.query(models.DnaAnalysis).offset(skip).limit(limit).all()
def get_dna_analysis(db: Session,
human_dna: schemas.HumanDNA):
"""Get specific DnaAnalysis stored in the database."""
str_dna, hash_dna = hash_utils.hash_md5(human_dna)
dna_analysis = get_dna_analysis_by_hash(db, hash_dna.hexdigest())
return dna_analysis
def get_dna_analysis_by_hash(db: Session,
hash: str):
"""Get specific DnaAnalysis stored in the database."""
try:
dna_analysis = db.query(models.DnaAnalysis).filter_by(
dna_hash=hash
).first()
except NoResultFound:
dna_analysis = None
return dna_analysis
def create_dna_analysis(db: Session,
human_dna: schemas.HumanDNA,
mutant: bool = False):
"""
Create a new DnaAnalysis record on the database.
"""
str_dna, hash_dna = hash_utils.hash_md5(human_dna)
db_dna = models.DnaAnalysis(
dna_hash=hash_dna.hexdigest(),
dna=str_dna,
is_mutant=mutant
)
db.add(db_dna)
db.commit()
db.refresh(db_dna)
return db_dna
def get_stats(db: Session):
"""
Get DumanDNA stats from database.
Mutant DNA's / Total Human DNA's
"""
total = db.query(models.DnaAnalysis).count()
mutant = db.query(models.DnaAnalysis).filter(
models.DnaAnalysis.is_mutant
).count()
ratio = 0
if mutant > 0 and total > 0:
ratio = mutant / total
stats = schemas.DnaStats(
count_human_dna=total,
count_mutant_dna=mutant,
ratio=float(str(ratio)[:3])
)
return stats
| 25.25641 | 71 | 0.642132 |
acdff9d161ce50d9566b0f339c5c47588441ca45 | 414 | py | Python | src/lib/pythonds/basic/queue.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | 2 | 2021-12-18T06:34:26.000Z | 2022-01-05T05:08:47.000Z | src/lib/pythonds/basic/queue.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null | src/lib/pythonds/basic/queue.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null | # Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005
#
#queue.py
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
| 18.818182 | 58 | 0.618357 |
acdffa9af2ea6b9a0b3e64239e98dc945466598b | 31,340 | py | Python | src/compas/geometry/_core/_algebra.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | src/compas/geometry/_core/_algebra.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | src/compas/geometry/_core/_algebra.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import sqrt
from math import fabs
__all__ = [
'close',
'allclose',
'argmin',
'argmax',
'add_vectors',
'add_vectors_xy',
'sum_vectors',
'cross_vectors',
'cross_vectors_xy',
'divide_vectors',
'divide_vectors_xy',
'dot_vectors',
'dot_vectors_xy',
'length_vector',
'length_vector_xy',
'length_vector_sqrd',
'length_vector_sqrd_xy',
'multiply_matrices',
'multiply_matrix_vector',
'multiply_vectors',
'multiply_vectors_xy',
'norm_vector',
'norm_vectors',
'normalize_vector',
'normalize_vector_xy',
'normalize_vectors',
'normalize_vectors_xy',
'homogenize_vectors',
'dehomogenize_vectors',
'orthonormalize_vectors',
'power_vector',
'power_vectors',
'scale_vector',
'scale_vector_xy',
'scale_vectors',
'scale_vectors_xy',
'square_vector',
'square_vectors',
'subtract_vectors',
'subtract_vectors_xy',
'transpose_matrix',
'vector_component',
'vector_component_xy',
'vector_average',
'vector_variance',
'vector_standard_deviation',
]
def vector_average(vector):
"""Average of a vector.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
List of values.
Returns
-------
float
The mean value.
"""
return sum(vector) / float(len(vector))
def vector_variance(vector):
"""Variance of a vector.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
List of values.
Returns
-------
float
The variance value.
"""
m = vector_average(vector)
return (sum([(i - m) ** 2 for i in vector]) / float(len(vector))) ** .5
def vector_standard_deviation(vector):
"""Standard deviation of a vector.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
List of values.
Returns
-------
float
The standard deviation value.
"""
return vector_variance(vector) ** .5
def close(value1, value2, tol=1e-05):
"""Returns True if two values are equal within a tolerance.
Parameters
----------
value1 : float or int
value2 : float or int
tol : float, optional
The tolerance for comparing values.
Returns
-------
bool
True if the values are closer than the tolerance.
False otherwise.
Examples
--------
>>> close(1., 1.001)
False
>>> close(1., 1.001, tol=1e-2)
True
"""
return fabs(value1 - value2) < tol
def allclose(l1, l2, tol=1e-05):
"""Returns True if two lists are element-wise equal within a tolerance.
Parameters
----------
l1 : sequence[float]
The first list of values.
l2 : sequence[float]
The second list of values.
tol : float, optional
The tolerance for comparing values.
Returns
-------
bool
True if all corresponding values of the two lists are closer than the tolerance.
False otherwise.
Notes
-----
The function is similar to NumPy's *allclose* function [1]_.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
Examples
--------
>>> allclose([0.1, 0.2, 0.3, 0.4], [0.1, 0.20001, 0.3, 0.4])
True
>>> allclose([0.1, 0.2, 0.3, 0.4], [0.1, 0.20001, 0.3, 0.4], tol=1e-6)
False
"""
if any(not allclose(a, b, tol) if hasattr(a, '__iter__') else fabs(a - b) > tol for a, b in zip(l1, l2)):
return False
return True
def argmax(values):
"""Returns the index of the first maximum value within an array.
Parameters
----------
values : sequence[float]
A list of values.
Returns
-------
int
The index of the first maximum value within an array.
Notes
-----
NumPy's *argmax* function [1]_ is different, it returns an array of indices.
References
----------
.. [1] https://numpy.org/doc/stable/reference/generated/numpy.argmax.html
Examples
--------
>>> argmax([2, 4, 4, 3])
1
"""
return max(range(len(values)), key=lambda i: values[i])
def argmin(values):
"""Returns the index of the first minimum value within an array.
Parameters
----------
values : sequence[float]
A list of values.
Returns
-------
int
The index of the first minimum value within an array.
Notes
-----
NumPy's *argmin* function [1]_ is different, it returns an array of indices.
References
----------
.. [1] https://numpy.org/doc/stable/reference/generated/numpy.argmin.html
Examples
--------
>>> argmin([4, 2, 2, 3])
1
"""
return min(range(len(values)), key=lambda i: values[i])
# ==============================================================================
# these return something of smaller dimension/length/...
# something_(of)vector/s
# ==============================================================================
def sum_vectors(vectors, axis=0):
"""Calculate the sum of a series of vectors along the specified axis.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
axis : int, optional
If ``axis == 0``, the sum is taken per column.
If ``axis == 1``, the sum is taken per row.
Returns
-------
list[float]
The length of the list is ``len(vectors[0])``, if ``axis == 0``.
The length is ``len(vectors)``, otherwise.
Examples
--------
>>> vectors = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]
>>> sum_vectors(vectors)
[3.0, 6.0, 9.0]
>>> sum_vectors(vectors, axis=1)
[6.0, 6.0, 6.0]
"""
if axis == 0:
vectors = zip(*vectors)
return [sum(vector) for vector in vectors]
def norm_vector(vector):
"""Calculate the length of a vector.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
Returns
-------
float
The L2 norm, or *length* of the vector.
Examples
--------
>>> norm_vector([2.0, 0.0, 0.0])
2.0
>>> norm_vector([1.0, 1.0, 0.0]) == sqrt(2.0)
True
"""
return sqrt(sum(axis ** 2 for axis in vector))
def norm_vectors(vectors):
"""
Calculate the norm of each vector in a list of vectors.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors
Returns
-------
list[float]
A list with the lengths of all vectors.
Examples
--------
>>> norm_vectors([[1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [3.0, 0.0, 0.0]])
[1.0, 2.0, 3.0]
"""
return [norm_vector(vector) for vector in vectors]
def length_vector(vector):
"""Calculate the length of the vector.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
Returns
-------
float
The length of the vector.
Examples
--------
>>> length_vector([2.0, 0.0, 0.0])
2.0
>>> length_vector([1.0, 1.0, 0.0]) == sqrt(2.0)
True
"""
return sqrt(length_vector_sqrd(vector))
def length_vector_xy(vector):
"""Compute the length of a vector, assuming it lies in the XY plane.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) components of the vector.
Returns
-------
float
The length of the XY component of the vector.
Examples
--------
>>> length_vector_xy([2.0, 0.0])
2.0
>>> length_vector_xy([2.0, 0.0, 0.0])
2.0
>>> length_vector_xy([2.0, 0.0, 2.0])
2.0
"""
return sqrt(length_vector_sqrd_xy(vector))
def length_vector_sqrd(vector):
"""Compute the squared length of a vector.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
Returns
-------
float
The squared length.
Examples
--------
>>> length_vector_sqrd([1.0, 1.0, 0.0])
2.0
"""
return vector[0] ** 2 + vector[1] ** 2 + vector[2] ** 2
def length_vector_sqrd_xy(vector):
"""Compute the squared length of a vector, assuming it lies in the XY plane.
Parameters
----------
vector : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) components of the vector.
Returns
-------
float
The squared length.
Examples
--------
>>> length_vector_sqrd_xy([1.0, 1.0])
2.0
>>> length_vector_sqrd_xy([1.0, 1.0, 0.0])
2.0
>>> length_vector_sqrd_xy([1.0, 1.0, 1.0])
2.0
"""
return vector[0] ** 2 + vector[1] ** 2
# ==============================================================================
# these perform an operation on a vector and return a modified vector
# -> elementwise operations on 1 vector
# should this not bet ...ed_vector
# ... or else modify the vector in-place
# ==============================================================================
def scale_vector(vector, factor):
"""Scale a vector by a given factor.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
factor : float
The scaling factor.
Returns
-------
[float, float, float]
The scaled vector.
Examples
--------
>>> scale_vector([1.0, 2.0, 3.0], 2.0)
[2.0, 4.0, 6.0]
>>> v = [2.0, 0.0, 0.0]
>>> scale_vector(v, 1 / length_vector(v))
[1.0, 0.0, 0.0]
"""
return [axis * factor for axis in vector]
def scale_vector_xy(vector, factor):
"""Scale a vector by a given factor, assuming it lies in the XY plane.
Parameters
----------
vector : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) components of the vector.
scale : float
Scale factor.
Returns
-------
[float, float, 0.0]
The scaled vector in the XY-plane.
Examples
--------
>>> scale_vector_xy([1.0, 2.0, 3.0], 2.0)
[2.0, 4.0, 0.0]
"""
return [vector[0] * factor, vector[1] * factor, 0.0]
def scale_vectors(vectors, factor):
"""Scale multiple vectors by a given factor.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
factor : float
The scaling factor.
Returns
-------
list[[float, float, float]]
The scaled vectors.
Examples
--------
>>>
"""
return [scale_vector(vector, factor) for vector in vectors]
def scale_vectors_xy(vectors, factor):
"""Scale multiple vectors by a given factor, assuming they lie in the XY plane.
Parameters
----------
vectors : sequence[[float, float] or [float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
factor : float
The scaling factor.
Returns
-------
list[[float, float, 0.0]]
The scaled vectors in the XY plane.
Examples
--------
>>>
"""
return [scale_vector_xy(vector, factor) for vector in vectors]
def normalize_vector(vector):
"""Normalise a given vector.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
Returns
-------
[float, float, float]
The normalized vector.
Examples
--------
>>>
"""
length = length_vector(vector)
if not length:
return vector
return [vector[0] / length, vector[1] / length, vector[2] / length]
def normalize_vector_xy(vector):
"""Normalize a vector, assuming it lies in the XY-plane.
Parameters
----------
vector : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) components of the vector.
Returns
-------
[float, float, 0.0]
The normalized vector in the XY-plane.
Examples
--------
>>>
"""
length = length_vector_xy(vector)
if not length:
return vector
return [vector[0] / length, vector[1] / length, 0.0]
def normalize_vectors(vectors):
"""Normalise multiple vectors.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
Returns
-------
list[[float, float, float]]
The normalized vectors.
Examples
--------
>>>
"""
return [normalize_vector(vector) for vector in vectors]
def normalize_vectors_xy(vectors):
"""Normalise multiple vectors, assuming they lie in the XY plane.
Parameters
----------
vectors : sequence[[float, float] or [float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
Returns
-------
list[[float, float, 0.0]]
The normalized vectors in the XY plane.
Examples
--------
>>>
"""
return [normalize_vector_xy(vector) for vector in vectors]
def power_vector(vector, power):
"""Raise a vector to the given power.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
power : int, float
The power to which to raise the vector.
Returns
-------
[float, float, float]
The raised vector.
Examples
--------
>>>
"""
return [axis ** power for axis in vector]
def power_vectors(vectors, power):
"""Raise a list of vectors to the given power.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
power : int, float
The power to which to raise the vectors.
Returns
-------
list[[float, float, float]]
The raised vectors.
Examples
--------
>>>
"""
return [power_vector(vector, power) for vector in vectors]
def square_vector(vector):
"""Raise a vector to the power 2.
Parameters
----------
vector : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
Returns
-------
[float, float, float]
The squared vector.
Examples
--------
>>>
"""
return power_vector(vector, 2)
def square_vectors(vectors):
"""Raise a multiple vectors to the power 2.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
Returns
-------
[float, float, float]]
The squared vectors.
Examples
--------
>>>
"""
return [square_vectors(vector) for vector in vectors]
# ==============================================================================
# these perform an operation with corresponding elements of the (2) input vectors as operands
# and return a vector with the results
# -> elementwise operations on two vectors
# ==============================================================================
def add_vectors(u, v):
"""Add two vectors.
Parameters
----------
u : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the first vector.
v : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the second vector.
Returns
-------
[float, float, float]
The resulting vector.
"""
return [a + b for (a, b) in zip(u, v)]
def add_vectors_xy(u, v):
"""Add two vectors, assuming they lie in the XY-plane.
Parameters
----------
u : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) components of the first vector.
v : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) components of the second vector.
Returns
-------
[float, float, 0.0]
Resulting vector in the XY-plane.
Examples
--------
>>>
"""
return [u[0] + v[0], u[1] + v[1], 0.0]
def subtract_vectors(u, v):
"""Subtract one vector from another.
Parameters
----------
u : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the first vector.
v : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the second vector.
Returns
-------
[float, float, float]
The resulting vector.
Examples
--------
>>>
"""
return [a - b for (a, b) in zip(u, v)]
def subtract_vectors_xy(u, v):
"""Subtract one vector from another, assuming they lie in the XY plane.
Parameters
----------
u : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
The XY(Z) components of the first vector.
v : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
The XY(Z) components of the second vector.
Returns
-------
[float, float, 0.0]
Resulting vector in the XY-plane.
Examples
--------
>>>
"""
return [u[0] - v[0], u[1] - v[1], 0.0]
def multiply_vectors(u, v):
"""Element-wise multiplication of two vectors.
Parameters
----------
u : [float, float, float] | :class:`~compas.geometry.Vector`
The XYZ components of the first vector.
v : l[float, float, float] | :class:`~compas.geometry.Vector`
The XYZ components of the second vector.
Returns
-------
[float, float, float]
Resulting vector.
Examples
--------
>>>
"""
return [a * b for (a, b) in zip(u, v)]
def multiply_vectors_xy(u, v):
"""Element-wise multiplication of two vectors assumed to lie in the XY plane.
Parameters
----------
u : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
The XY(Z) components of the first vector.
v : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
The XY(Z) components of the second vector.
Returns
-------
[float, float, 0.0]
Resulting vector in the XY plane.
Examples
--------
>>>
"""
return [u[0] * v[0], u[1] * v[1], 0.0]
def divide_vectors(u, v):
"""Element-wise division of two vectors.
Parameters
----------
u : [float, float, float] | :class:`~compas.geometry.Vector`
The XYZ components of the first vector.
v : [float, float, float] | :class:`~compas.geometry.Vector`
The XYZ components of the second vector.
Returns
-------
[float, float, float]
Resulting vector.
Examples
--------
>>>
"""
return [a / b for (a, b) in zip(u, v)]
def divide_vectors_xy(u, v):
"""Element-wise division of two vectors assumed to lie in the XY plane.
Parameters
----------
u : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
The XY(Z) components of the first vector.
v : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
The XY(Z) components of the second vector.
Returns
-------
[float, float, 0.0]
Resulting vector in the XY plane.
Examples
--------
>>>
"""
return [u[0] / v[0], u[1] / v[1], 0.0]
# ==============================================================================
# ...
# ==============================================================================
def cross_vectors(u, v):
r"""Compute the cross product of two vectors.
Parameters
----------
u : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the first vector.
v : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the second vector.
Returns
-------
[float, float, float]
The cross product of the two vectors.
Notes
-----
The xyz components of the cross product of two vectors :math:`\mathbf{u}`
and :math:`\mathbf{v}` can be computed as the *minors* of the following matrix:
.. math::
:nowrap:
\begin{bmatrix}
x & y & z \\
u_{x} & u_{y} & u_{z} \\
v_{x} & v_{y} & v_{z}
\end{bmatrix}
Therefore, the cross product can be written as:
.. math::
:nowrap:
\begin{eqnarray}
\mathbf{u} \times \mathbf{v}
& =
\begin{bmatrix}
u_{y} * v_{z} - u_{z} * v_{y} \\
u_{z} * v_{x} - u_{x} * v_{z} \\
u_{x} * v_{y} - u_{y} * v_{x}
\end{bmatrix}
\end{eqnarray}
Examples
--------
>>> cross_vectors([1.0, 0.0, 0.0], [0.0, 1.0, 0.0])
[0.0, 0.0, 1.0]
"""
return [u[1] * v[2] - u[2] * v[1],
u[2] * v[0] - u[0] * v[2],
u[0] * v[1] - u[1] * v[0]]
def cross_vectors_xy(u, v):
"""Compute the cross product of two vectors, assuming they lie in the XY-plane.
Parameters
----------
u : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) coordinates of the first vector.
v : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) coordinates of the second vector.
Returns
-------
[float, float, float]
The cross product of the two vectors.
This vector will be perpendicular to the XY plane.
Examples
--------
>>> cross_vectors_xy([1.0, 0.0], [0.0, 1.0])
[0.0, 0.0, 1.0]
>>> cross_vectors_xy([1.0, 0.0, 0.0], [0.0, 1.0, 0.0])
[0.0, 0.0, 1.0]
>>> cross_vectors_xy([1.0, 0.0, 1.0], [0.0, 1.0, 1.0])
[0.0, 0.0, 1.0]
"""
return [0.0, 0.0, u[0] * v[1] - u[1] * v[0]]
def dot_vectors(u, v):
"""Compute the dot product of two vectors.
Parameters
----------
u : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the first vector.
v : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the second vector.
Returns
-------
float
The dot product of the two vectors.
Examples
--------
>>> dot_vectors([1.0, 0, 0], [2.0, 0, 0])
2.0
"""
return sum(a * b for a, b in zip(u, v))
def dot_vectors_xy(u, v):
"""Compute the dot product of two vectors, assuming they lie in the XY-plane.
Parameters
----------
u : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) coordinates of the first vector.
v : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XY(Z) coordinates of the second vector.
Returns
-------
float
The dot product of the XY components of the two vectors.
Examples
--------
>>> dot_vectors_xy([1.0, 0], [2.0, 0])
2.0
>>> dot_vectors_xy([1.0, 0, 0], [2.0, 0, 0])
2.0
>>> dot_vectors_xy([1.0, 0, 1], [2.0, 0, 1])
2.0
"""
return u[0] * v[0] + u[1] * v[1]
def vector_component(u, v):
"""Compute the component of u in the direction of v.
Parameters
----------
u : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
v : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the direction.
Returns
-------
[float, float, float]
The component of u in the direction of v.
Notes
-----
This is similar to computing direction cosines, or to the projection of
a vector onto another vector. See the respective Wikipedia pages ([1]_, [2]_)
for more info.
References
----------
.. [1] *Direction cosine*. Available at https://en.wikipedia.org/wiki/Direction_cosine.
.. [2] *Vector projection*. Available at https://en.wikipedia.org/wiki/Vector_projection.
Examples
--------
>>> vector_component([1.0, 2.0, 3.0], [1.0, 0.0, 0.0])
[1.0, 0.0, 0.0]
"""
l2 = length_vector_sqrd(v)
if not l2:
return [0, 0, 0]
x = dot_vectors(u, v) / l2
return scale_vector(v, x)
def vector_component_xy(u, v):
"""Compute the component of u in the direction of v, assuming they lie in the XY-plane.
Parameters
----------
u : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the vector.
v : [float, float] or [float, float, float] | :class:`~compas.geometry.Vector`
XYZ components of the direction.
Returns
-------
[float, float, 0.0]
The component of u in the XY plane, in the direction of v.
Notes
-----
This is similar to computing direction cosines, or to the projection of
a vector onto another vector. See the respective Wikipedia pages ([1]_, [2]_)
for more info.
References
----------
.. [1] *Direction cosine*. Available at https://en.wikipedia.org/wiki/Direction_cosine.
.. [2] *Vector projection*. Available at https://en.wikipedia.org/wiki/Vector_projection.
Examples
--------
>>> vector_component_xy([1, 2, 0], [1, 0, 0])
[1.0, 0.0, 0.0]
"""
l2 = length_vector_sqrd_xy(v)
if not l2:
return [0, 0, 0]
x = dot_vectors_xy(u, v) / l2
return scale_vector_xy(v, x)
# ==============================================================================
# these involve vectors interpreted as matrices (lists of lists)
# -> matrix multiplication
# ==============================================================================
def transpose_matrix(M):
"""Transpose a matrix.
Parameters
----------
M : list[list[float]] | :class:`~compas.geometry.Transformation`
The matrix to be transposed.
Returns
-------
list[list[float]]
The result matrix.
"""
return list(map(list, zip(* list(M))))
def multiply_matrices(A, B):
r"""Mutliply a matrix with a matrix.
Parameters
----------
A : list[list[float]] | :class:`~compas.geometry.Transformation`
The first matrix.
B : list[list[float]] | :class:`~compas.geometry.Transformation`
The second matrix.
Returns
-------
list[list[float]]
The result matrix.
Raises
------
Exception
If the shapes of the matrices are not compatible.
If the row length of B is inconsistent.
Notes
-----
This is a pure Python version of the following linear algebra procedure:
.. math::
\mathbf{A} \cdot \mathbf{B} = \mathbf{C}
with :math:`\mathbf{A}` [m x n], :math:`\mathbf{B}` [n x o], and :math:`\mathbf{C}` [m x o].
Examples
--------
>>> A = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]
>>> B = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]
>>> multiply_matrices(A, B)
[[4.0, 0.0, 0.0], [0.0, 4.0, 0.0], [0.0, 0.0, 4.0]]
"""
A = list(A)
B = list(B)
n = len(B) # number of rows in B
o = len(B[0]) # number of cols in B
if not all(len(row) == o for row in B):
raise Exception('Row length in matrix B is inconsistent.')
if not all([len(row) == n for row in A]):
raise Exception('Matrix shapes are not compatible.')
B = list(zip(* list(B)))
return [[dot_vectors(row, col) for col in B] for row in A]
def multiply_matrix_vector(A, b):
r"""Multiply a matrix with a vector.
Parameters
----------
A : list[list[float]] | :class:`~compas.geometry.Transformation`
The matrix.
b : [float, float, float] | :class:`~compas.geometry.Vector`
The vector.
Returns
-------
[float, float, float]
The resulting vector.
Raises
------
Exception
If not all rows of the matrix have the same length as the vector.
Notes
-----
This is a Python version of the following linear algebra procedure:
.. math::
\mathbf{A} \cdot \mathbf{x} = \mathbf{b}
with :math:`\mathbf{A}` a *m* by *n* matrix, :math:`\mathbf{x}` a vector of
length *n*, and :math:`\mathbf{b}` a vector of length *m*.
Examples
--------
>>> matrix = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]
>>> vector = [1.0, 2.0, 3.0]
>>> multiply_matrix_vector(matrix, vector)
[2.0, 4.0, 6.0]
"""
n = len(b)
if not all([len(row) == n for row in A]):
raise Exception('Matrix shape is not compatible with vector length.')
return [dot_vectors(row, b) for row in A]
# ==============================================================================
# linalg
# ==============================================================================
def homogenize_vectors(vectors, w=1.0):
"""Homogenise a list of vectors.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
w : float, optional
Homogenisation parameter.
Returns
-------
list[[float, float, float]]
Homogenised vectors.
Notes
-----
Vectors described by XYZ components are homogenised by appending a homogenisation
parameter to the components, and by dividing each component by that parameter.
Homogenisatioon of vectors is often used in relation to transformations.
Examples
--------
>>> vectors = [[1.0, 0.0, 0.0]]
>>> homogenize_vectors(vectors)
[[1.0, 0.0, 0.0, 1.0]]
"""
return [[x / w, y / w, z / w, w] for x, y, z in vectors]
def dehomogenize_vectors(vectors):
"""Dehomogenise a list of vectors.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
A list of vectors.
Returns
-------
list[float, float, float]
Dehomogenised vectors.
Examples
--------
>>>
"""
return [[x * w, y * w, z * w] for x, y, z, w in vectors]
def orthonormalize_vectors(vectors):
"""Orthonormalize a set of vectors.
Parameters
----------
vectors : sequence[[float, float, float] | :class:`~compas.geometry.Vector`]
The set of vectors to othonormalize.
Returns
-------
list[[float, float, float]]
An othonormal basis for the input vectors.
Notes
-----
This creates a basis for the range (column space) of the matrix A.T,
with A = vectors.
Orthonormalisation is according to the Gram-Schmidt process.
Examples
--------
>>> orthonormalize_vectors([[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
"""
basis = []
for v in vectors:
if basis:
e = subtract_vectors(v, sum_vectors([vector_component(v, b) for b in basis]))
else:
e = v
if any(axis > 1e-10 for axis in e):
basis.append(normalize_vector(e))
return basis
| 23.634992 | 109 | 0.547064 |
acdffcc3c1d55b8ab456e8e20cd42cb65dea0939 | 83 | py | Python | private.py | v-jaswel/azspeech2txt | 700acdf9d60632be9103da5b3a6f5cc955494ded | [
"CC-BY-4.0",
"MIT"
] | 4 | 2019-06-21T01:52:10.000Z | 2021-12-16T05:55:23.000Z | private.py | v-jaswel/azspeech2txt | 700acdf9d60632be9103da5b3a6f5cc955494ded | [
"CC-BY-4.0",
"MIT"
] | null | null | null | private.py | v-jaswel/azspeech2txt | 700acdf9d60632be9103da5b3a6f5cc955494ded | [
"CC-BY-4.0",
"MIT"
] | 2 | 2020-07-21T19:49:31.000Z | 2021-01-02T15:04:17.000Z | subscription_key = "35854d387b5149e08cb703c5ddc58a20"
region = "southeastasia"
| 27.666667 | 53 | 0.807229 |
acdffed8eabb1a3daa4378735556a8d8d60fc3d7 | 2,650 | py | Python | components/tfx/StatisticsGen/component.py | kelvin169/pipelines | dce1cc98339a7a37260d7842319b2acc9d1cbe3f | [
"Apache-2.0"
] | 1 | 2019-12-27T12:49:42.000Z | 2019-12-27T12:49:42.000Z | components/tfx/StatisticsGen/component.py | kelvin169/pipelines | dce1cc98339a7a37260d7842319b2acc9d1cbe3f | [
"Apache-2.0"
] | 5 | 2021-03-02T01:30:32.000Z | 2022-02-26T03:15:15.000Z | components/tfx/StatisticsGen/component.py | kelvin169/pipelines | dce1cc98339a7a37260d7842319b2acc9d1cbe3f | [
"Apache-2.0"
] | 1 | 2020-05-09T17:13:25.000Z | 2020-05-09T17:13:25.000Z | from kfp.components import InputPath, OutputPath
def StatisticsGen(
# Inputs
input_data_path: InputPath('Examples'),
#input_data_path: 'ExamplesPath',
# Outputs
output_path: OutputPath('ExampleStatistics'),
#output_path: 'ExampleStatistics',
):
#) -> NamedTuple('Outputs', [
# ('output', 'ExampleStatistics'),
#]):
"""Construct a StatisticsGen component.
Args:
input_data: A Channel of `Examples` type, likely generated by the
[ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
This needs to contain two splits labeled `train` and `eval`. _required_
# examples: Forwards compatibility alias for the `input_data` argument.
Returns:
output: `ExampleStatistics` channel for statistics of each split
provided in the input examples.
"""
import json
import os
from google.protobuf import json_format
from tfx.types import standard_artifacts
from tfx.types import channel_utils
# Create input dict.
input_base_path = input_data_path
input_artifact_class = standard_artifacts.Examples
# Recovering splits
splits = sorted(os.listdir(input_data_path))
input_data_artifacts = []
for split in splits:
artifact = input_artifact_class()
artifact.split = split
artifact.uri = os.path.join(input_base_path, split) + '/'
input_data_artifacts.append(artifact)
input_data_channel = channel_utils.as_channel(input_data_artifacts)
from tfx.components.statistics_gen.component import StatisticsGen
component_class_instance = StatisticsGen(
input_data=input_data_channel,
)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for output_artifact in output_dict['output']:
output_artifact.uri = os.path.join(output_path, output_artifact.split) # Default split is ''
print('Component instance: ' + str(component_class_instance))
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
#return (output_path,)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
StatisticsGen,
base_image='tensorflow/tfx:0.15.0',
output_component_file='component.yaml'
)
| 33.974359 | 111 | 0.713962 |
ace0025e59b903e2b8c3baf3525c283ff9840c44 | 12,440 | py | Python | bot/cogs/sqcs_plugin/lecture.py | phantom0174/HSQCC_bot | 93d4b40f7d8885bcf927590926370d67e05a5760 | [
"MIT"
] | 4 | 2020-11-25T16:31:41.000Z | 2021-08-28T21:35:01.000Z | bot/cogs/sqcs_plugin/lecture.py | phantom0174/HSQCC_bot | 93d4b40f7d8885bcf927590926370d67e05a5760 | [
"MIT"
] | 12 | 2020-12-21T09:42:13.000Z | 2021-05-16T06:17:49.000Z | bot/cogs/sqcs_plugin/lecture.py | phantom0174/HSQCC_bot | 93d4b40f7d8885bcf927590926370d67e05a5760 | [
"MIT"
] | 2 | 2021-04-13T08:28:12.000Z | 2021-07-11T02:41:35.000Z | from discord.ext import commands, tasks
import asyncio
import random
from ...core import sqcs_module as sm
from ...core.db.jsonstorage import JsonApi
from ...core.db.mongodb import Mongo
from ...core.utils import Time, DiscordExt
from ...core.cog_config import CogExtension
from ...core.fluctlight_ext import Fluct
import discord
import statistics
from cn2an import an2cn
class LectureConfig(CogExtension):
@commands.group()
@commands.has_any_role('總召', 'Administrator')
async def lect_config(self, ctx):
pass
@lect_config.command()
async def list(self, ctx):
"""cmd
列出所有有註冊的講座。
"""
lect_set_cursor = Mongo('sqcs-bot').get_cur('LectureSetting')
data = lect_set_cursor.find({})
if data.count() == 0:
return await ctx.send(':exclamation: 沒有講座資料!')
# improved code
lecture_list = '\n'.join(map(
lambda item: f'name: {item["name"]}\n'
f'week: {item["week"]}\n'
f'status: {item["status"]}\n'
f'population: {item["population"]}\n',
data
))
await ctx.send(lecture_list)
await ctx.send(':white_check_mark: 紀錄尋找完畢!')
@lect_config.command()
async def add(self, ctx):
"""cmd
註冊講座資料。
"""
# ask for arguments
def check(message):
return message.channel == ctx.channel and message.author == ctx.author
try:
await ctx.send(':question: 請問講座名稱是什麼呢?')
name = (await self.bot.wait_for('message', check=check, timeout=30)).content
await ctx.send(':question: 請問在星期幾舉辦呢?')
week = (await self.bot.wait_for('message', check=check, timeout=30)).content
await ctx.send(':question: 請問在當天甚麼時候開始呢?')
start_time = (await self.bot.wait_for('message', check=check, timeout=30)).content
except asyncio.TimeoutError:
return
# left _id for random
lecture_config = {
"name": name,
"week": int(week),
"status": False,
"population": []
}
lect_set_cursor = Mongo('sqcs-bot').get_cur('LectureSetting')
lect_set_cursor.insert_one(lecture_config)
lect_category_channel = ctx.guild.get_channel(743517006040662127)
lecture_text_channel = await ctx.guild.create_text_channel(
name=name,
category=lect_category_channel,
topic=f'講座在星期{an2cn(week)}的 {start_time},歡迎參加!'
)
await lecture_text_channel.send(
f':white_check_mark: 本頻道為 講座 - {name} 的專用頻道\n'
f'自動生成時間:{Time.get_info("whole")}'
)
await ctx.guild.create_voice_channel(
name=name,
category=lect_category_channel
)
await ctx.send(':white_check_mark: 講座資料 與 專屬頻道 已建置完畢,謝謝你的配合!')
@lect_config.command()
async def remove(self, ctx, del_lect_week: int):
"""cmd
刪除講座資料。
"""
lect_set_cursor = Mongo('sqcs-bot').get_cur('LectureSetting')
try:
lect_set_cursor.delete_one({"week": del_lect_week})
await ctx.send(f':white_check_mark: 星期 `{del_lect_week}` 的講座資料已被移除!')
except Exception as e:
await ctx.send(f':x: 移除星期 `{del_lect_week}` 的講座資料時發生了錯誤!')
await ctx.send(content=e, delete_after=5.0)
class Lecture(CogExtension):
@commands.group()
@commands.has_any_role('總召', 'Administrator')
async def lect(self, ctx):
pass
@lect.command()
async def start(self, ctx, week: int):
"""cmd
開始講座。
.week: 星期數
"""
lect_set_cursor = Mongo('sqcs-bot').get_cur('LectureSetting')
lect_config = lect_set_cursor.find_one({"week": week})
text_channel = discord.utils.get(ctx.guild.text_channels, name=lect_config['name'])
voice_channel = discord.utils.get(ctx.guild.voice_channels, name=lect_config['name'])
if not lect_config:
return await ctx.send(f':x: 星期 `{week}` 沒有講座!')
if lect_config["status"]:
return await ctx.send(':x: 講座已經開始了!')
msg = await JsonApi.get_humanity('lecture/start/pt_1', '\n')
msg += f'星期 `{week}` 的講座-`{lect_config["name"]}` 開始了呦 \\^~^\n'
msg += await JsonApi.get_humanity('lecture/start/pt_2')
await text_channel.send(msg)
execute = {
"$set": {
"population": [],
"status": True
}
}
lect_set_cursor.update({"week": week}, execute)
# join the voice channel to speak
voice_client = await voice_channel.connect()
audio_source = discord.FFmpegPCMAudio('./bot/assets/audio/lecture_starts.mp3')
voice_client.play(audio_source)
while voice_client.is_playing():
await asyncio.sleep(1)
voice_client.stop()
await voice_client.disconnect()
# delete previous special message
msg_logs = await text_channel.history(limit=200).flatten()
for msg in msg_logs:
if msg.content and msg.content.startswith('&'):
await msg.delete()
# cool-down to exclude member who leave at once
await asyncio.sleep(random.randint(30, 180))
attendants = [member.id for member in voice_channel.members]
await sm.report_lect_attend(self.bot, attendants, week)
# continue fetching population statistics, waiting for display using dash and flask integration
# origin: lecture ans check
@lect.command()
async def add_point(self, ctx, delta_value: float, members_id: commands.Greedy[int]):
lect_ongoing_cursor = Mongo('sqcs-bot').get_cur('LectureOngoing')
fluct_ext = Fluct(score_mode='custom')
for member_id in members_id:
final_delta_score = await fluct_ext.add_score(member_id, delta_value)
await fluct_ext.active_log_update(member_id)
member_lecture_statistics = lect_ongoing_cursor.find_one({"_id": member_id})
if not member_lecture_statistics:
member_info = {
"_id": member_id,
"score": final_delta_score,
"count": 1
}
lect_ongoing_cursor.insert_one(member_info)
else:
execute = {
"$inc": {
"score": final_delta_score,
"count": 1
}
}
lect_ongoing_cursor.update_one({"_id": member_id}, execute)
await ctx.send(':white_check_mark: 指令執行完畢!')
@lect.command()
async def end(self, ctx, week: int):
"""cmd
結束講座。
.week: 星期數
"""
lect_set_cursor = Mongo('sqcs-bot').get_cur('LectureSetting')
lect_config = lect_set_cursor.find_one({"week": week})
text_channel = discord.utils.get(ctx.guild.text_channels, name=lect_config['name'])
voice_channel = discord.utils.get(ctx.guild.voice_channels, name=lect_config['name'])
if not lect_set_cursor["status"]:
return await ctx.send(':exclamation: 講座已經結束了!')
msg = await JsonApi.get_humanity('lecture/end/main', '\n')
population_list = [pop['count'] for pop in lect_config["population"]]
average_population = statistics.mean(population_list)
population_level = int(round(average_population / 10))
msg += await JsonApi.get_humanity(f'lecture/end/reactions/{population_level}')
await text_channel.send(msg)
execute = {
"$set": {
"status": False
}
}
lect_set_cursor.update_one({"week": week}, execute)
# join the voice channel to speak
voice_client = await voice_channel.connect()
audio_source = discord.FFmpegPCMAudio('./bot/assets/audio/lecture_ends.mp3')
voice_client.play(audio_source)
while voice_client.is_playing():
await asyncio.sleep(1)
voice_client.stop()
await voice_client.disconnect()
# show lecture final data
lect_ongoing_cursor = Mongo('sqcs-bot').get_cur('LectureOngoing')
answered_member_list = lect_ongoing_cursor.find({}).sort("score", -1)
if answered_member_list.count() == 0:
return await ctx.send(':exclamation: There are no data to show!')
ranking_medal_prefix = {
0: ':first_place:',
1: ':second_place:',
2: ':third_place:'
}
member_rank_list = ''
for rank, member in enumerate(answered_member_list):
medal = ranking_medal_prefix.get(rank, ':medal:')
member_name = await DiscordExt.get_member_nick_name(ctx.guild, member["_id"])
member_rank_list += (
f'{medal}{member_name} | '
f'Score: {member["score"]}, '
f'Answer Count: {member["count"]}\n'
)
embed_para = [
':scroll: Lecture Event Result',
'default',
0x42fcff,
['Lecture final info'],
[member_rank_list]
]
await text_channel.send(embed=await DiscordExt.create_embed(*embed_para))
lect_ongoing_cursor.delete_many({})
# kick member from the voice channel
countdown_duration = 60
def content(s):
return f':exclamation: 所有成員將在 {s} 秒後被移出 {voice_channel.name}'
message = await ctx.send(content(countdown_duration))
while countdown_duration:
await message.edit(content=content(countdown_duration))
await asyncio.sleep(1)
countdown_duration -= 1
await message.delete()
for member in voice_channel.members:
await member.move_to(None)
class LectureAttendVerify(CogExtension):
@commands.group()
async def lect_verify(self, ctx):
pass
@lect_verify.command()
@commands.dm_only()
@commands.cooldown(1, 15, commands.BucketType.user)
async def attend(self, ctx, token: str):
"""cmd
尚未啟用。
"""
verify_cursor = Mongo('sqcs-bot').get_cur('Verification')
data = verify_cursor.find_one({"TOKEN": token, "reason": "lect"})
if not data:
return await ctx.send(
':x: 講座資料庫中不存在這個token\n'
'請在15秒後重試或聯絡總召'
)
# fetching score parameters
fluct_ext = Fluct(member_id=ctx.author.id, score_mode='lect_attend')
try:
await fluct_ext.add_score()
await fluct_ext.active_log_update()
await fluct_ext.lect_attend_update()
verify_cursor.delete_one({"TOKEN": token, "reason": "lect"})
await ctx.send(':white_check_mark: 操作成功!')
except BaseException:
guild = self.bot.get_guild(784607509629239316)
report_channel = discord.utils.get(guild.text_channels, name='sqcs-lecture-attend')
await report_channel.send(
f'[DB MANI ERROR][to: {ctx.author.id}][inc_score_mode: lecture_attend]'
)
await ctx.send(':x: 操作失敗,請聯繫總召><')
class LectureAuto(CogExtension):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lect_set_cursor = Mongo('sqcs-bot').get_cur('LectureSetting')
self.lect_population_log.start()
@tasks.loop(minutes=2)
async def lect_population_log(self):
await self.bot.wait_until_ready()
ongoing_lect = self.lect_set_cursor.find_one({"status": True})
if not ongoing_lect:
return
guild = self.bot.get_guild(743507979369709639)
voice_channel = discord.utils.get(guild.voice_channels, name=ongoing_lect['name'])
population = len(voice_channel.members)
if population:
execute = {
"$push": {
"population": {
"count": population,
"time_stamp": Time.get_info('custom', "%Y-%m-%d %H:%M")
}
}
}
self.lect_set_cursor.update_one({"week": ongoing_lect['week']}, execute)
def setup(bot):
bot.add_cog(LectureConfig(bot))
bot.add_cog(Lecture(bot))
bot.add_cog(LectureAttendVerify(bot))
| 34.082192 | 103 | 0.589228 |
ace003969793dcfc4df02bb43dbe2b0415df2f8a | 4,317 | py | Python | benchmark/startQiskit_Class1338.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class1338.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class1338.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=50
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[1]) # number=26
prog.cz(input_qubit[4],input_qubit[1]) # number=27
prog.h(input_qubit[1]) # number=28
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[4],input_qubit[1]) # number=35
prog.z(input_qubit[4]) # number=46
prog.rx(0.8011061266653969,input_qubit[2]) # number=37
prog.h(input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.cx(input_qubit[1],input_qubit[0]) # number=47
prog.x(input_qubit[0]) # number=48
prog.cx(input_qubit[1],input_qubit[0]) # number=49
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[0],input_qubit[1]) # number=42
prog.x(input_qubit[1]) # number=43
prog.cx(input_qubit[0],input_qubit[1]) # number=44
prog.x(input_qubit[2]) # number=11
prog.y(input_qubit[1]) # number=45
prog.x(input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=41
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.x(input_qubit[0]) # number=23
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.x(input_qubit[1]) # number=31
prog.cx(input_qubit[0],input_qubit[1]) # number=32
prog.x(input_qubit[2]) # number=15
prog.h(input_qubit[4]) # number=29
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class1338.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 31.977778 | 80 | 0.611767 |
ace003f26c8952f23c68bafc9a8665f16600bfd9 | 3,406 | py | Python | day 14/Martijn - Python/extended_polymerization.py | AE-nv/aedvent-code-2021 | 7ce199d6be5f6cce2e61a9c0d26afd6d064a86a7 | [
"MIT"
] | 1 | 2021-12-02T12:09:11.000Z | 2021-12-02T12:09:11.000Z | day 14/Martijn - Python/extended_polymerization.py | AE-nv/aedvent-code-2021 | 7ce199d6be5f6cce2e61a9c0d26afd6d064a86a7 | [
"MIT"
] | null | null | null | day 14/Martijn - Python/extended_polymerization.py | AE-nv/aedvent-code-2021 | 7ce199d6be5f6cce2e61a9c0d26afd6d064a86a7 | [
"MIT"
] | 1 | 2021-12-01T21:14:41.000Z | 2021-12-01T21:14:41.000Z | import time
import math
def parse_rules(rules):
rule_dict = {}
for rule in rules:
split = rule.strip().split('->')
pattern = split[0].strip()
insert = split[1].strip()
rule_dict[pattern] = insert
return rule_dict
def parse_polymer(polymer):
pair_dict = {}
for i in range(0, len(polymer) -1):
pair = polymer[i] + polymer[i+1]
if pair in pair_dict.keys():
pair_dict[pair] += 1
else:
pair_dict[pair] = 1
return pair_dict
def find_element_at_index(index, polymer_dict):
result = -1
for element in polymer_dict:
if index in polymer_dict[element]:
result = element
break
return result
def insert_indeces_element(element, indices, insert_element, inserst_index ):
new = []
for index in indices:
if index >= inserst_index:
new.append(index + 1)
else:
new.append(index)
if insert_element == element:
new.append(inserst_index)
return new
def insert_elements(polymer_dict, insert_dict):
for key,value in insert_dict.items():
for element in polymer_dict:
polymer_dict[element] = insert_indeces_element(element, polymer_dict[element], value, key )
if not value in polymer_dict:
polymer_dict[value] = [key]
return polymer_dict
def apply_rules(pair_dict, rule_dict):
new_pair_dict = {}
for pair in pair_dict:
insert_element = rule_dict[pair]
element_1 = pair[0]
element_2 = pair[1]
new_pair_1 = element_1 + insert_element
new_pair_2 = insert_element + element_2
new_pairs = [new_pair_1, new_pair_2]
for new_pair in new_pairs:
if new_pair in new_pair_dict.keys():
new_pair_dict[new_pair] = pair_dict[pair] + new_pair_dict[new_pair]
else:
new_pair_dict[new_pair] = pair_dict[pair]
return new_pair_dict
def calculate_diff_most_least_common_element(pair_dict, first, last):
count_element_dict = {}
for pair in pair_dict:
for x in range(0,2):
element = pair[x]
if element in count_element_dict.keys():
count_element_dict[element] += pair_dict[pair]
else:
count_element_dict[element] = pair_dict[pair]
min_count = float('inf')
max_count = float('-inf')
for element in count_element_dict:
count = count_element_dict[element] // 2
if element == first or element == last:
count += 1
if count > max_count:
max_count = count
if count < min_count:
min_count = count
print(max_count - min_count)
if __name__ == '__main__':
input = open('./day 14/Martijn - Python/input.txt').readlines()
start_polymer = [c for c in input[0].strip()]
rules = [rule for rule in input if len(rule.split('->')) == 2]
rule_dict = parse_rules(rules)
pair_dict = parse_polymer(start_polymer)
tic = time.perf_counter()
for step in range(0, 40):
print(step)
pair_dict = apply_rules(pair_dict, rule_dict)
toc = time.perf_counter()
print(f"Apply step in {toc - tic:0.4f} seconds")
calculate_diff_most_least_common_element(pair_dict, start_polymer[0], start_polymer[-1])
| 31.831776 | 103 | 0.609513 |
ace004915138e9229cdcd6fb30a7dcc30fda22f5 | 1,165 | py | Python | examples/example5.py | evanleeturner/easymodeler | a710433f2a3f5ef31b598e46c5681300cb2e97f1 | [
"BSD-3-Clause"
] | null | null | null | examples/example5.py | evanleeturner/easymodeler | a710433f2a3f5ef31b598e46c5681300cb2e97f1 | [
"BSD-3-Clause"
] | 4 | 2017-07-15T17:03:20.000Z | 2019-03-13T17:24:43.000Z | examples/example5.py | evanleeturner/easymodeler | a710433f2a3f5ef31b598e46c5681300cb2e97f1 | [
"BSD-3-Clause"
] | null | null | null | import emlib
import logging
"""
Example 5. Lotka Volterra Calibration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
emlib.emlog.setLevel(logging.INFO)
def LV3_int(t,initial,dtinput,constants):
x = initial[0]
y = initial[1]
A = constants.Val("A")
B = constants.Val("B")
C = constants.Val("C")
D = constants.Val("D")
x_dot = (A * x) - (B * x *y)
y_dot = (D * x * y) - (C * y)
return [x_dot, y_dot]
hares = emlib.Observation("Hares",filename="LVdata.csv")
LVmodel = emlib.Model(LV3_int)
LVtest = emlib.Calibration()
LVtest.Add("A",val=.3,min=.01,max=.7)
LVtest.Add("B",val=.04,min=.01,max=.07)
LVtest.Add("C",val=.6,min=.5,max=1.0)
LVtest.Add("D",val=.04,min=.01,max=.05)
LVtest.initial = [30.0,4.0]
LVtime = emlib.TimeSeries(filename="LVinput.csv")
LVmodel.Integrate(LVtest.initial,Calibration=LVtest,TimeSeries=LVtime,dt=(1.0/12.0))
LVmodel.Validate(hares,graph=True)
LVmodel.fit.Print()
best = LVmodel.Calibrate(LVtest,hares,runs=500,TimeSeries=LVtime,dt=(1.0/12.0))
best.Print()
LVmodel.Integrate(LVtest.initial,Calibration=best,TimeSeries=LVtime,dt=(1.0/12.0))
LVmodel.Validate(hares,graph=True)
LVmodel.fit.Print()
| 23.77551 | 84 | 0.656652 |
ace0055b07a1d19a0acf9019ad7f297826e35c75 | 5,088 | py | Python | voc2darknet.py | innovationgarage/PCparts | d021b73288fbb7ad4882716053a323a1a22c15b8 | [
"Apache-2.0"
] | null | null | null | voc2darknet.py | innovationgarage/PCparts | d021b73288fbb7ad4882716053a323a1a22c15b8 | [
"Apache-2.0"
] | null | null | null | voc2darknet.py | innovationgarage/PCparts | d021b73288fbb7ad4882716053a323a1a22c15b8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 15:40:43 2016
This script is to convert the txt annotation files to appropriate format needed by YOLO
@author: Martin Hwang
Email: dhhwang89@gmail.com
"""
import os
from os import walk, getcwd
from PIL import Image
import xml.etree.ElementTree as ET
import math
class color:
BOLD = '\033[1m'
END = '\033[0m'
DEFAULT = '\033[0;37;40m'
RED = '\033[91m'
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (round(x,3), round(y,3), round(w,3), round(h,3))
# Custom define class
# classes = ["F_Baby", "F_Kid", "F_10s", "F_20s", "F_30s", "F_40s", "F_50s", "F_Senior", "M_Baby", "M_Kid", "M_10s", "M_20s", "M_30s", "M_40s", "M_50s", "M_Senior"]
classes = ["ram"]
# Configure Paths
annotation_path = "Annotations/"
yolo_label_path = "labels/"
list_file_name = "parts"
wd = getcwd()
list_file = open('%s/%s_list.txt' % (wd, list_file_name), 'w')
# Get input text file list
xml_name_list = []
for (dirpath, dirnames, filenames) in walk(annotation_path):
xml_name_list.extend(filenames)
break
print(color.BOLD + "xml file list : {}".format(xml_name_list) + color.END + '\n')
try:
#Process
for xml_name in xml_name_list:
print('------------------------------------------------------------------------')
# open xml file
xml_path = annotation_path + xml_name
xml_file = open(xml_path, "r")
print("Input file : " + xml_path)
tree = ET.parse(xml_file)
root = tree.getroot()
size = root.find('size')
if size == None:
raise Exception("can't find size tag")
xml_width = int(size.find('width').text)
xml_height = int(size.find('height').text)
# img_path = str('%s/voc2012/jpeg/%s.jpg' % (wd, os.path.splitext(xml_name)[0]))
img_path = str('%s/Images/%s.JPG' % (wd, os.path.splitext(xml_name)[0]))
objects = root.findall('object')
if len(objects) == 0:
print(color.BOLD + color.RED + "ERROR : can't find object tag"+ color.END)
if os.path.exists(xml_path):
os.remove(xml_path)
if os.path.exists(img_path):
os.remove(img_path)
continue
# open Image filee
img = Image.open(img_path)
img_width = int(img.size[0])
img_height = int(img.size[1])
print('Image path : ' + img_path + '\n')
print("xml size (width, height) : " + "(" + str(xml_width) + ',' + str(xml_height) + ")")
print('image size (width, height) : ' + "(" + str(img_width) + ',' + str(img_height) + ")\n")
if not xml_width == img_width or not xml_height == img_height:
print(color.BOLD + color.RED + "xml and image size different" + color.END)
raise Exception("xml and image size different")
# Open output result files
result_outpath = str(yolo_label_path + xml_name[:-3] + "txt")
result_outfile = open(result_outpath, "w")
print("Output:" + result_outpath + '\n')
for object in objects:
cls = object.find('name').text
if cls == None:
raise Exception("can't find name tag")
elif cls not in classes:
raise Exception("name tag not involve this classes")
bndbox = object.find('bndbox')
if bndbox == None:
if os.path.exists(xml_path):
os.remove(xml_path)
if os.path.exists(img_path):
os.remove(img_path)
raise Exception("can't find bndbox tag")
xmin = int(bndbox.find('xmin').text)
xmax = int(bndbox.find('xmax').text)
ymin = int(bndbox.find('ymin').text)
ymax = int(bndbox.find('ymax').text)
b = (float(xmin), float(xmax), float(ymin), float(ymax))
bb = convert((img_width, img_height),b)
cls_id = classes.index(cls)
print('class name, index : ' + '(' + str(cls) + ", " + str(cls_id) + ')')
print("bndbox Size : " + str(b))
print("convert result : " + str(bb) + '\n')
result_outfile.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
result_outfile.close()
# list_file.writelines('%s/voc2007/jpeg/%s.jpg\n' % (wd, os.path.splitext(xml_name)[0]))
list_file.writelines('%s/Images/%s.JPG\n' % (wd, os.path.splitext(xml_name)[0]))
list_file.close()
except Exception as e:
print(color.BOLD + color.RED + "ERROR : {}".format(e) + color.END)
if not result_outfile.closed:
print(color.BOLD + color.RED + "Close result_outfile" + color.END)
result_outfile.close()
if os.path.exists(result_outpath):
print(color.BOLD + color.RED + "delete result outpath" + color.END)
os.remove(result_outfile)
| 33.038961 | 164 | 0.555425 |
ace006eef7037dc8c08f5ff87485fa4b45b4aaf8 | 58,552 | py | Python | python/ccxt/delta.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | 4 | 2021-09-24T09:18:36.000Z | 2022-03-15T16:47:09.000Z | python/ccxt/delta.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | null | null | null | python/ccxt/delta.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | 2 | 2021-10-01T21:51:37.000Z | 2021-10-02T16:23:05.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class delta(Exchange):
def describe(self):
return self.deep_extend(super(delta, self).describe(), {
'id': 'delta',
'name': 'Delta Exchange',
'countries': ['VC'], # Saint Vincent and the Grenadines
'rateLimit': 300,
'version': 'v2',
# new metainfo interface
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchCurrencies': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'1d': '1d',
'7d': '7d',
'1w': '1w',
'2w': '2w',
'1M': '30d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/99450025-3be60a00-2931-11eb-9302-f4fd8d8589aa.jpg',
'test': {
'public': 'https://testnet-api.delta.exchange',
'private': 'https://testnet-api.delta.exchange',
},
'api': {
'public': 'https://api.delta.exchange',
'private': 'https://api.delta.exchange',
},
'www': 'https://www.delta.exchange',
'doc': [
'https://docs.delta.exchange',
],
'fees': 'https://www.delta.exchange/fees',
'referral': 'https://www.delta.exchange/app/signup/?code=IULYNB',
},
'api': {
'public': {
'get': [
'assets',
'settings',
'indices',
'products',
'tickers',
'tickers/{symbol}',
'l2orderbook/{symbol}',
'trades/{symbol}',
'history/candles',
'history/sparklines',
],
},
'private': {
'get': [
'orders',
'orders/leverage',
'positions',
'positions/margined',
'orders/history',
'fills',
'fills/history/download/csv',
'wallet/balances',
'wallet/transactions',
'wallet/transactions/download',
'deposits/address',
],
'post': [
'orders',
'orders/batch',
'orders/leverage',
'positions/change_margin',
],
'put': [
'orders',
'orders/batch',
],
'delete': [
'orders',
'orders/all',
'orders/batch',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.15 / 100,
'maker': 0.10 / 100,
'tiers': {
'taker': [
[0, 0.15 / 100],
[100, 0.13 / 100],
[250, 0.13 / 100],
[1000, 0.1 / 100],
[5000, 0.09 / 100],
[10000, 0.075 / 100],
[20000, 0.065 / 100],
],
'maker': [
[0, 0.1 / 100],
[100, 0.1 / 100],
[250, 0.09 / 100],
[1000, 0.075 / 100],
[5000, 0.06 / 100],
[10000, 0.05 / 100],
[20000, 0.05 / 100],
],
},
},
},
'precisionMode': TICK_SIZE,
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'exceptions': {
'exact': {
# Margin required to place order with selected leverage and quantity is insufficient.
'insufficient_margin': InsufficientFunds, # {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
'order_size_exceed_available': InvalidOrder, # The order book doesn't have sufficient liquidity, hence the order couldnt be filled, for example, ioc orders
'risk_limits_breached': BadRequest, # orders couldn't be placed as it will breach allowed risk limits.
'invalid_contract': BadSymbol, # The contract/product is either doesn't exist or has already expired.
'immediate_liquidation': InvalidOrder, # Order will cause immediate liquidation.
'out_of_bankruptcy': InvalidOrder, # Order prices are out of position bankruptcy limits.
'self_matching_disrupted_post_only': InvalidOrder, # Self matching is not allowed during auction.
'immediate_execution_post_only': InvalidOrder, # orders couldn't be placed as it includes post only orders which will be immediately executed
'bad_schema': BadRequest, # {"error":{"code":"bad_schema","context":{"schema_errors":[{"code":"validation_error","message":"id is required","param":""}]}},"success":false}
'invalid_api_key': AuthenticationError, # {"success":false,"error":{"code":"invalid_api_key"}}
'invalid_signature': AuthenticationError, # {"success":false,"error":{"code":"invalid_signature"}}
'open_order_not_found': OrderNotFound, # {"error":{"code":"open_order_not_found"},"success":false}
'unavailable': ExchangeNotAvailable, # {"error":{"code":"unavailable"},"success":false}
},
'broad': {
},
},
})
def fetch_time(self, params={}):
response = self.publicGetSettings(params)
#
# {
# "result":{
# "server_time":1605472733766141,
# "deto_referral_mining_daily_reward":"25000",
# "deto_total_reward_pool":"100000000",
# "deto_trade_mining_daily_reward":"75000",
# "kyc_deposit_limit":"20",
# "kyc_withdrawal_limit":"2",
# "under_maintenance":"false"
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.safe_integer_product(result, 'server_time', 0.001)
def fetch_status(self, params={}):
response = self.publicGetSettings(params)
result = self.safe_value(response, 'result', {})
underMaintenance = self.safe_value(result, 'under_maintenance')
status = 'maintenance' if (underMaintenance == 'true') else 'ok'
updated = self.safe_integer_product(result, 'server_time', 0.001)
self.status = self.extend(self.status, {
'status': status,
'updated': updated,
})
return self.status
def fetch_currencies(self, params={}):
response = self.publicGetAssets(params)
#
# {
# "result":[
# {
# "base_withdrawal_fee":"0.0005",
# "deposit_status":"enabled",
# "id":2,
# "interest_credit":true,
# "interest_slabs":[
# {"limit":"0.1","rate":"0"},
# {"limit":"1","rate":"0.05"},
# {"limit":"5","rate":"0.075"},
# {"limit":"10","rate":"0.1"},
# {"limit":"9999999999999999","rate":"0"}
# ],
# "kyc_deposit_limit":"10",
# "kyc_withdrawal_limit":"2",
# "min_withdrawal_amount":"0.001",
# "minimum_precision":4,
# "name":"Bitcoin",
# "precision":8,
# "sort_priority":1,
# "symbol":"BTC",
# "variable_withdrawal_fee":"0",
# "withdrawal_status":"enabled"
# },
# ],
# "success":true
# }
#
currencies = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
numericId = self.safe_integer(currency, 'id')
code = self.safe_currency_code(id)
depositStatus = self.safe_string(currency, 'deposit_status')
withdrawalStatus = self.safe_string(currency, 'withdrawal_status')
depositsEnabled = (depositStatus == 'enabled')
withdrawalsEnabled = (withdrawalStatus == 'enabled')
active = depositsEnabled and withdrawalsEnabled
precision = self.safe_integer(currency, 'precision')
result[code] = {
'id': id,
'numericId': numericId,
'code': code,
'name': self.safe_string(currency, 'name'),
'info': currency, # the original payload
'active': active,
'fee': self.safe_number(currency, 'base_withdrawal_fee'),
'precision': 1 / math.pow(10, precision),
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_number(currency, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
def load_markets(self, reload=False, params={}):
markets = super(delta, self).load_markets(reload, params)
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
if (currenciesByNumericId is None) or reload:
self.options['currenciesByNumericId'] = self.index_by(self.currencies, 'numericId')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId')
if (marketsByNumericId is None) or reload:
self.options['marketsByNumericId'] = self.index_by(self.markets, 'numericId')
return markets
def fetch_markets(self, params={}):
response = self.publicGetProducts(params)
#
# {
# "meta":{
# "after":null,
# "before":null,
# "limit":100,
# "total_count":81
# },
# "result":[
# {
# "annualized_funding":"5.475000000000000000",
# "is_quanto":false,
# "ui_config":{
# "default_trading_view_candle":"15",
# "leverage_slider_values":[1,3,5,10,25,50],
# "price_clubbing_values":[0.001,0.005,0.05,0.1,0.5,1,5],
# "show_bracket_orders":false,
# "sort_priority":29,
# "tags":[]
# },
# "basis_factor_max_limit":"0.15",
# "symbol":"P-LINK-D-151120",
# "id":1584,
# "default_leverage":"5.000000000000000000",
# "maker_commission_rate":"0.0005",
# "contract_unit_currency":"LINK",
# "strike_price":"12.507948",
# "settling_asset":{
# # asset structure
# },
# "auction_start_time":null,
# "auction_finish_time":null,
# "settlement_time":"2020-11-15T12:00:00Z",
# "launch_time":"2020-11-14T11:55:05Z",
# "spot_index":{
# # index structure
# },
# "trading_status":"operational",
# "tick_size":"0.001",
# "position_size_limit":100000,
# "notional_type":"vanilla", # vanilla, inverse
# "price_band":"0.4",
# "barrier_price":null,
# "description":"Daily LINK PUT options quoted in USDT and settled in USDT",
# "insurance_fund_margin_contribution":"1",
# "quoting_asset":{
# # asset structure
# },
# "liquidation_penalty_factor":"0.2",
# "product_specs":{"max_volatility":3,"min_volatility":0.3,"spot_price_band":"0.40"},
# "initial_margin_scaling_factor":"0.0001",
# "underlying_asset":{
# # asset structure
# },
# "state":"live",
# "contract_value":"1",
# "initial_margin":"2",
# "impact_size":5000,
# "settlement_price":null,
# "contract_type":"put_options", # put_options, call_options, move_options, perpetual_futures, interest_rate_swaps, futures, spreads
# "taker_commission_rate":"0.0005",
# "maintenance_margin":"1",
# "short_description":"LINK Daily PUT Options",
# "maintenance_margin_scaling_factor":"0.00005",
# "funding_method":"mark_price",
# "max_leverage_notional":"20000"
# },
# ],
# "success":true
# }
#
markets = self.safe_value(response, 'result', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
type = self.safe_string(market, 'contract_type')
# settlingAsset = self.safe_value(market, 'settling_asset', {})
quotingAsset = self.safe_value(market, 'quoting_asset', {})
underlyingAsset = self.safe_value(market, 'underlying_asset', {})
baseId = self.safe_string(underlyingAsset, 'symbol')
quoteId = self.safe_string(quotingAsset, 'symbol')
id = self.safe_string(market, 'symbol')
numericId = self.safe_integer(market, 'id')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = id
swap = False
future = False
option = False
if type == 'perpetual_futures':
type = 'swap'
swap = True
future = False
option = False
if id.find('_') < 0:
symbol = base + '/' + quote
elif (type == 'call_options') or (type == 'put_options') or (type == 'move_options'):
type = 'option'
swap = False
option = True
future = False
elif type == 'futures':
type = 'future'
swap = False
option = False
future = True
precision = {
'amount': 1.0, # number of contracts
'price': self.safe_number(market, 'tick_size'),
}
limits = {
'amount': {
'min': 1.0,
'max': self.safe_number(market, 'position_size_limit'),
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_size'),
'max': None,
},
}
state = self.safe_string(market, 'state')
active = (state == 'live')
maker = self.safe_number(market, 'maker_commission_rate')
taker = self.safe_number(market, 'taker_commission_rate')
result.append({
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': type,
'option': option,
'swap': swap,
'future': future,
'maker': maker,
'taker': taker,
'precision': precision,
'limits': limits,
'info': market,
'active': active,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "close":15837.5,
# "high":16354,
# "low":15751.5,
# "mark_price":"15820.100867",
# "open":16140.5,
# "product_id":139,
# "size":640552,
# "spot_price":"15827.050000000001",
# "symbol":"BTCUSDT",
# "timestamp":1605373550208262,
# "turnover":10298630.3735,
# "turnover_symbol":"USDT",
# "turnover_usd":10298630.3735,
# "volume":640.5520000000001
# }
#
timestamp = self.safe_integer_product(ticker, 'timestamp', 0.001)
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'close')
open = self.safe_number(ticker, 'open')
change = None
average = None
percentage = None
if (open is not None) and (last is not None):
change = last - open
average = self.sum(last, open) / 2
if open != 0.0:
percentage = (change / open) * 100
baseVolume = self.safe_number(ticker, 'volume')
quoteVolume = self.safe_number(ticker, 'turnover')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTickersSymbol(self.extend(request, params))
#
# {
# "result":{
# "close":15837.5,
# "high":16354,
# "low":15751.5,
# "mark_price":"15820.100867",
# "open":16140.5,
# "product_id":139,
# "size":640552,
# "spot_price":"15827.050000000001",
# "symbol":"BTCUSDT",
# "timestamp":1605373550208262,
# "turnover":10298630.3735,
# "turnover_symbol":"USDT",
# "turnover_usd":10298630.3735,
# "volume":640.5520000000001
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ticker(result, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
#
# {
# "result":[
# {
# "close":0.003966,
# "high":0.004032,
# "low":0.003606,
# "mark_price":"0.00396328",
# "open":0.003996,
# "product_id":1327,
# "size":6242,
# "spot_price":"0.0039555",
# "symbol":"AAVEBTC",
# "timestamp":1605374143864107,
# "turnover":23.997904999999996,
# "turnover_symbol":"BTC",
# "turnover_usd":387957.4544782897,
# "volume":6242
# },
# ],
# "success":true
# }
#
tickers = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = self.publicGetL2orderbookSymbol(self.extend(request, params))
#
# {
# "result":{
# "buy":[
# {"price":"15814.0","size":912},
# {"price":"15813.5","size":1279},
# {"price":"15813.0","size":1634},
# ],
# "sell":[
# {"price":"15814.5","size":625},
# {"price":"15815.0","size":982},
# {"price":"15815.5","size":1328},
# ],
# "symbol":"BTCUSDT"
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order_book(result, symbol, None, 'buy', 'sell', 'price', 'size')
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
#
# private fetchMyTrades
#
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
#
id = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'order_id')
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
timestamp = self.safe_integer_product(trade, 'timestamp', 0.001, timestamp)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'size')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
product = self.safe_value(trade, 'product', {})
marketId = self.safe_string(product, 'symbol')
symbol = self.safe_symbol(marketId, market)
sellerRole = self.safe_string(trade, 'seller_role')
side = self.safe_string(trade, 'side')
if side is None:
if sellerRole == 'taker':
side = 'sell'
elif sellerRole == 'maker':
side = 'buy'
takerOrMaker = self.safe_string(trade, 'role')
metaData = self.safe_value(trade, 'meta_data', {})
type = self.safe_string(metaData, 'order_type')
if type is not None:
type = type.replace('_order', '')
feeCost = self.safe_number(trade, 'commission')
fee = None
if feeCost is not None:
settlingAsset = self.safe_value(product, 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTradesSymbol(self.extend(request, params))
#
# {
# "result":[
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":1605393120,
# "open":15989,
# "high":15989,
# "low":15987.5,
# "close":15987.5,
# "volume":565
# }
#
return [
self.safe_timestamp(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
limit = limit if limit else 2000 # max 2000
if since is None:
end = self.seconds()
request['end'] = end
request['start'] = end - limit * duration
else:
start = int(since / 1000)
request['start'] = start
request['end'] = self.sum(start, limit * duration)
response = self.publicGetHistoryCandles(self.extend(request, params))
#
# {
# "success":true,
# "result":[
# {"time":1605393120,"open":15989,"high":15989,"low":15987.5,"close":15987.5,"volume":565},
# {"time":1605393180,"open":15966,"high":15966,"low":15959,"close":15959,"volume":24},
# {"time":1605393300,"open":15973,"high":15973,"low":15973,"close":15973,"volume":1288},
# ]
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetWalletBalances(params)
#
# {
# "result":[
# {
# "asset_id":1,
# "available_balance":"0",
# "balance":"0",
# "commission":"0",
# "id":154883,
# "interest_credit":"0",
# "order_margin":"0",
# "pending_referral_bonus":"0",
# "pending_trading_fee_credit":"0",
# "position_margin":"0",
# "trading_fee_credit":"0",
# "user_id":22142
# },
# ],
# "success":true
# }
#
balances = self.safe_value(response, 'result', [])
result = {'info': response}
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId', {})
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset_id')
currency = self.safe_value(currenciesByNumericId, currencyId)
code = currencyId if (currency is None) else currency['code']
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['free'] = self.safe_string(balance, 'available_balance')
result[code] = account
return self.parse_balance(result, False)
def fetch_position(self, symbol, params=None):
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
}
response = self.privateGetPositions(self.extend(request, params))
#
# {
# "result":{
# "entry_price":null,
# "size":0,
# "timestamp":1605454074268079
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return result
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.privateGetPositionsMargined(params)
#
# {
# "success": True,
# "result": [
# {
# "user_id": 0,
# "size": 0,
# "entry_price": "string",
# "margin": "string",
# "liquidation_price": "string",
# "bankruptcy_price": "string",
# "adl_level": 0,
# "product_id": 0
# }
# ]
# }
#
result = self.safe_value(response, 'result', [])
return result
def parse_order_status(self, status):
statuses = {
'open': 'open',
'pending': 'open',
'closed': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, cancelOrder, editOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# }
#
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'client_order_id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId', {})
market = self.safe_value(marketsByNumericId, marketId, market)
symbol = marketId if (market is None) else market['symbol']
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'order_type')
type = type.replace('_order', '')
price = self.safe_number(order, 'limit_price')
amount = self.safe_number(order, 'size')
remaining = self.safe_number(order, 'unfilled_size')
average = self.safe_number(order, 'average_fill_price')
fee = None
feeCost = self.safe_number(order, 'paid_commission')
if feeCost is not None:
feeCurrencyCode = None
if market is not None:
settlingAsset = self.safe_value(market['info'], 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': average,
'filled': None,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
orderType = type + '_order'
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(symbol, price),
'size': self.amount_to_precision(symbol, amount),
'side': side,
'order_type': orderType,
# 'client_order_id': 'string',
# 'time_in_force': 'gtc', # gtc, ioc, fok
# 'post_only': 'false', # 'true',
# 'reduce_only': 'false', # 'true',
}
if type == 'limit':
request['limit_price'] = self.price_to_precision(symbol, price)
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
params = self.omit(params, ['clientOrderId', 'client_order_id'])
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(symbol, price),
# 'size': self.amount_to_precision(symbol, amount),
}
if amount is not None:
request['size'] = int(self.amount_to_precision(symbol, amount))
if price is not None:
request['limit_price'] = self.price_to_precision(symbol, price)
response = self.privatePutOrders(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
}
response = self.privateDeleteOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":"cancelled_by_user",
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"cancelled",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'cancel_limit_orders': 'true',
# 'cancel_stop_orders': 'true',
}
response = self.privateDeleteOrdersAll(self.extend(request, params))
#
# {
# "result":{},
# "success":true
# }
#
return response
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetOrders', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetOrdersHistory', symbol, since, limit, params)
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'order_types': types, # comma-separated, market, limit, stop_market, stop_limit, all_stop
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after': string, # after cursor for pagination
# 'before': string, # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# {
# "success": True,
# "result": [
# {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# ],
# "meta": {
# "after": "string",
# "before": "string"
# }
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after': string, # after cursor for pagination
# 'before': string, # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = self.privateGetFills(self.extend(request, params))
#
# {
# "meta":{
# "after":null,
# "before":null,
# "limit":10,
# "total_count":2
# },
# "result":[
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'asset_id': currency['numericId'],
# 'end_time': self.seconds(),
# 'after': 'string', # after cursor for pagination
# 'before': 'string', # before cursor for pagination
# 'page_size': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_id'] = currency['numericId']
if limit is not None:
request['page_size'] = limit
response = self.privateGetWalletTransactions(self.extend(request, params))
#
# {
# "meta":{"after":null,"before":null,"limit":10,"total_count":1},
# "result":[
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ledger(result, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'pnl': 'pnl',
'deposit': 'transaction',
'withdrawal': 'transaction',
'commission': 'fee',
'conversion': 'trade',
# 'perpetual_futures_funding': 'perpetual_futures_funding',
# 'withdrawal_cancellation': 'withdrawal_cancellation',
'referral_bonus': 'referral',
'commission_rebate': 'rebate',
# 'promo_credit': 'promo_credit',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
#
id = self.safe_string(item, 'uuid')
direction = None
account = None
metaData = self.safe_value(item, 'meta_data', {})
referenceId = self.safe_string(metaData, 'transaction_id')
referenceAccount = None
type = self.safe_string(item, 'transaction_type')
if (type == 'deposit') or (type == 'commission_rebate') or (type == 'referral_bonus') or (type == 'pnl') or (type == 'withdrawal_cancellation') or (type == 'promo_credit'):
direction = 'in'
elif (type == 'withdrawal') or (type == 'commission') or (type == 'conversion') or (type == 'perpetual_futures_funding'):
direction = 'out'
type = self.parse_ledger_entry_type(type)
currencyId = self.safe_integer(item, 'asset_id')
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
currency = self.safe_value(currenciesByNumericId, currencyId, currency)
code = None if (currency is None) else currency['code']
amount = self.safe_number(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'created_at'))
after = self.safe_number(item, 'balance')
before = max(0, after - amount)
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset_symbol': currency['id'],
}
response = self.privateGetDepositsAddress(self.extend(request, params))
#
# {
# "success":true,
# "result":{
# "id":19628,
# "user_id":22142,
# "address":"0x0eda26523397534f814d553a065d8e46b4188e9a",
# "status":"active",
# "updated_at":"2020-11-15T20:25:53.000Z",
# "created_at":"2020-11-15T20:25:53.000Z",
# "asset_symbol":"USDT",
# "custodian":"onc"
# }
# }
#
result = self.safe_value(response, 'result', {})
address = self.safe_string(result, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
requestPath = '/' + self.version + '/' + self.implode_params(path, params)
url = self.urls['api'][api] + requestPath
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
timestamp = str(self.seconds())
headers = {
'api-key': self.apiKey,
'timestamp': timestamp,
}
auth = method + timestamp + requestPath
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = '?' + self.urlencode(query)
auth += queryString
url += queryString
else:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers['signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
#
error = self.safe_value(response, 'error', {})
errorCode = self.safe_string(error, 'code')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
| 41.496811 | 238 | 0.458225 |
ace00732cc81d14006a2c50befeadb4a75c3f1c2 | 1,579 | py | Python | package/simulation/stockData/sinusoidStockDataSource.py | iryzhkov/Reinforcement-Learning-Stock-Trading | dc4b962f584e75013bf951674297497e36bfecee | [
"MIT"
] | 1 | 2019-06-11T14:59:51.000Z | 2019-06-11T14:59:51.000Z | package/simulation/stockData/sinusoidStockDataSource.py | iryzhkov/Reinforcement-Learning-Stock-Trading | dc4b962f584e75013bf951674297497e36bfecee | [
"MIT"
] | 12 | 2020-02-11T05:32:09.000Z | 2020-02-15T22:49:59.000Z | package/simulation/stockData/sinusoidStockDataSource.py | iryzhkov/Reinforcement-Learning-Stock-Trading | dc4b962f584e75013bf951674297497e36bfecee | [
"MIT"
] | null | null | null | """Sinusoid Stock Data Source.
"""
from package.simulation.stockData.baseStockDataSource import StockDataSource
import logging
import pandas as pd
import math
from datetime import datetime, timedelta
# Class for generating sinusoid-like data
class SinusoidStockDataSource(StockDataSource):
def __init__(self, stocks_config: dict):
"""Initializer for Sinusoid Stock Data Source
Args:
stocks_config (dict): Configuration for the stocks
"""
super(SinusoidStockDataSource, self).__init__();
self.stocks_config = stocks_config
def prepareDataForDates(self, start_date, end_date, stocks):
"""Generates sinusoid data from stock_config.
Args:
start_date (datetime): Date range start for the preparation.
end_date (datetime): Date range end for the preparation.
stocks (list): List of stocks
Raises:
"""
day_count = (end_date - start_date).days + 1
for stock in stocks:
d = {}
for date in (start_date + timedelta(i) for i in range(day_count)):
phase = (date - self.stocks_config[stock]['anchor_date']).days * \
math.pi * 2 / self.stocks_config[stock]['period']
value = math.sin(phase) * self.stocks_config[stock]['magnitude'] + self.stocks_config[stock]['delta']
d[date] = [value, value]
self.stock_data[stock] = pd.DataFrame.from_dict(data=d, orient='index', columns=['High', 'Low'])
if __name__ == "__main__":
pass
| 30.960784 | 117 | 0.634579 |
ace0074deeb5382a2d1cd127d0057877ac0d26e7 | 387 | py | Python | tests/test_torchxs.py | phzwart/torchxs | bf1c5b4b2278265f176f54d995ae9ebbf20a0616 | [
"BSD-3-Clause"
] | 1 | 2022-01-18T22:17:25.000Z | 2022-01-18T22:17:25.000Z | tests/test_torchxs.py | phzwart/torchxs | bf1c5b4b2278265f176f54d995ae9ebbf20a0616 | [
"BSD-3-Clause"
] | null | null | null | tests/test_torchxs.py | phzwart/torchxs | bf1c5b4b2278265f176f54d995ae9ebbf20a0616 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Tests for `torchxs` package."""
import unittest
from torchxs import torchxs
class TestTorchxs(unittest.TestCase):
"""Tests for `torchxs` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
| 17.590909 | 46 | 0.622739 |
ace00758e1f4c12789fecdbf6d1c5e7cdda9d719 | 5,142 | py | Python | baekjoon/python/convex_hull_4181.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | baekjoon/python/convex_hull_4181.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | baekjoon/python/convex_hull_4181.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | # Title: Convex Hull
# Link: https://www.acmicpc.net/problem/4181
import sys
from collections import deque, defaultdict
from math import atan2
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_words = lambda: sys.stdin.readline().strip().split()
class Point:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
self.angle = 0
self.dist_sq = 0
def __str__(self):
return '{} {}'.format(self.x, self.y)
def draw(self, plt):
plt.scatter(self.x, self.y, 10)
def __eq__(self, other):
if self.x == other.x and self.y == other.y:
return True
return False
def __lt__(self, other):
if self.y < other.y:
return True
elif self.y > other.y:
return False
else:
if self.x < other.x:
return True
return False
def set_angle(self, base):
self.angle = atan2(self.y-base.y, self.x-base.x)
def set_dist_sq(self, base):
self.dist_sq = (self.y-base.y)**2 + (self.x-base.x)**2
def get_dist_sq(self, other):
return (self.x-other.x)**2 + (self.y-other.y)**2
class ConvelHull:
def __init__(self, dots: list):
self.dots = dots
self.lowest_dot = None
self.highest_dot = None
self.convel_hull = None
self.on_the_lines = []
self.leftest_dot = None
def ccw(self, a: Point, b: Point, c: Point):
return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)
def get_low_and_high_dot(self):
self.lowest_dot = min(self.dots)
self.highest_dot = max(self.dots)
left_x = min(self.dots, key=lambda d: d.x).x
left_x_dots = list(filter(lambda d: d.x==left_x, self.dots))
self.leftest_dot = min(left_x_dots, key=lambda d:d.y)
def print_dots(self, title: str):
print(title)
for dot in self.dots:
print('{} '.format(dot.__str__()), end=' ')
def sort_dots(self):
for dot in self.dots:
dot.set_angle(self.lowest_dot)
dot.set_dist_sq(self.lowest_dot)
self.dots = sorted(self.dots, key= lambda d: d.dist_sq)
self.dots = sorted(self.dots, key= lambda d: d.angle)
last_same_angles = []
prev_angle = self.dots[-1].angle
for dot in reversed(self.dots):
if dot.angle == prev_angle:
last_same_angles.append(dot)
else:
break
self.dots = self.dots[:-len(last_same_angles)] + last_same_angles
def get_hull(self, only_edge=True):
if len(self.dots) == 2:
return self.dots
self.get_low_and_high_dot()
self.sort_dots()
self.dots += [self.lowest_dot]
self.dots = deque(self.dots)
stack = deque()
stack.append(self.dots.popleft())
stack.append(self.dots.popleft())
while self.dots:
second = stack.pop()
first = stack.pop()
third = self.dots.popleft()
is_ccw = self.ccw(first, second, third)
if is_ccw > 0:
stack.append(first)
stack.append(second)
stack.append(third)
elif not only_edge and is_ccw == 0:
stack.append(first)
ds = first.get_dist_sq(second)
dt = first.get_dist_sq(third)
if ds <= dt:
stack.append(second)
stack.append(third)
else:
stack.append(third)
stack.append(second)
else:
stack.append(first)
if len(stack) == 1:
stack.append(third)
else:
self.dots.appendleft(third)
if len(stack) == 2:
return [stack[0], self.highest_dot]
stack.pop()
while True:
if stack[0] == self.leftest_dot:
return stack
stack.rotate()
self.convel_hull = stack
def get_area(self):
first, second = 0, 0
self.convel_hull.append(self.convel_hull[0])
prev_dot = self.convel_hull[0]
for dot in self.convel_hull[1:]:
first += (prev_dot.x * dot.y)
second += (prev_dot.y * dot.x)
prev_dot = dot
return 0.5 * abs(first - second)
def solution(dots: list):
ch = ConvelHull(dots)
dots = ch.get_hull(only_edge=False)
ans = [str(len(dots))]
for dot in dots:
ans.append('{} {}'.format(dot.x, dot.y))
return '\n'.join(ans)
def main():
n = read_single_int()
dots = []
for _ in range(n):
x, y, c = read_list_words()
if c == 'Y':
dots.append(Point(int(x), int(y)))
print(solution(dots))
if __name__ == '__main__':
main() | 27.945652 | 74 | 0.510891 |
ace007dae576123f00e8c14baa224ef3c6781f3a | 501 | py | Python | smoked/views.py | martinsvoboda/django-smoked | 42b64fff23a37e3df42f8fc54535ea496dd27d84 | [
"MIT"
] | 6 | 2015-01-14T12:02:58.000Z | 2021-08-17T23:18:56.000Z | smoked/views.py | martinsvoboda/django-smoked | 42b64fff23a37e3df42f8fc54535ea496dd27d84 | [
"MIT"
] | 7 | 2015-01-24T11:36:07.000Z | 2015-01-26T04:55:31.000Z | smoked/views.py | martinsvoboda/django-smoked | 42b64fff23a37e3df42f8fc54535ea496dd27d84 | [
"MIT"
] | 1 | 2015-01-25T20:48:06.000Z | 2015-01-25T20:48:06.000Z | # coding: utf-8
from __future__ import unicode_literals
from django.template.response import TemplateResponse
from smoked.runner import run_tests
def smoked_results(request):
results = list(run_tests())
total = len(results)
success = sum(1 for item in results if 'error' not in item)
ctx = {
'results': results,
'total': total,
'success': success,
'failure': total - success,
}
return TemplateResponse(request, 'smoked/results.html', ctx)
| 22.772727 | 64 | 0.670659 |
ace007fbec11176b0107b93f5ab213fdcd603ff8 | 13,423 | py | Python | seven_sqlite.py | hollo08/stockstrategy | 09ece2457d653439a8ace80a6ac7dd4da9813846 | [
"MIT"
] | 1 | 2020-09-18T15:08:46.000Z | 2020-09-18T15:08:46.000Z | seven_sqlite.py | hollo08/stockstrategy | 09ece2457d653439a8ace80a6ac7dd4da9813846 | [
"MIT"
] | null | null | null | seven_sqlite.py | hollo08/stockstrategy | 09ece2457d653439a8ace80a6ac7dd4da9813846 | [
"MIT"
] | 2 | 2022-01-23T03:26:22.000Z | 2022-03-28T16:21:01.000Z | #! /usr/bin/env python
# -*- encoding: utf-8 -*-
import numpy as np
import pandas as pd
import tushare as ts
import pandas_datareader.data as web
import matplotlib.pyplot as plt
import mpl_finance as mpf
import datetime
import time
import talib
import sqlite3
from sqlalchemy import create_engine
from pandas.io import sql
import os
from seven import pro_daily_stock, json_to_str
# 参数设置
pd.set_option('display.expand_frame_repr', False) # False不允许换行
pd.set_option('display.max_rows', 10) # 显示的最大行数
pd.set_option('display.max_columns', 6) # 显示的最大列数
pd.set_option('precision', 2) # 显示小数点后的位数
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
conn = sqlite3.connect('stock-data.db')
c = conn.cursor()
# 设置token
token = os.environ.get('tushare_token')
pro = ts.pro_api(token) # 初始化pro接口
def sqlite_test1():
try:
# 创建表
c.execute('''CREATE TABLE SZ000002
(ID INT PRIMARY KEY NOT NULL,
TIME TEXT NOT NULL,
CODE TEXT NOT NULL,
HIGH REAL,
LOW REAL,
CLOSE REAL,
OPEN REAL,
DESCRIPTION CHAR(50));''')
conn.commit()
# 查询表结构
c.execute("PRAGMA table_info(SZ000002)")
print(c.fetchall())
# [(0, 'ID', 'INT', 1, None, 1), (1, 'TIME', 'TEXT', 1, None, 0), (2, 'CODE', 'TEXT', 1, None, 0), (3, 'HIGH', 'REAL', 0, None, 0), (4, 'LOW', 'REAL', 0, None, 0), (5, 'CLOSE', 'REAL', 0, None, 0), (6, 'OPEN', 'REAL', 0, None, 0), (7, 'DESCRIPTION', 'CHAR(50)', 0, None, 0)]
# 插入表
c.execute("INSERT INTO SZ000002 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (1, '2019-1-1', 000002, 10.12, 10.12, 10.12, 10.12,'Buy Signal' )")
c.execute("INSERT INTO SZ000002 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (2, '2019-1-2', 000002, 10.13, 10.13, 10.13, 10.13,'Sell Signal' )")
c.execute("INSERT INTO SZ000002 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (3, '2019-1-3', 000002, 10.14, 10.14, 10.14, 10.14,'Buy Signal' )")
c.execute("INSERT INTO SZ000002 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (4, '2019-1-4', 000002, 10.15, 10.15, 10.15, 10.15,'Sell Signal' )")
conn.commit()
# 查询表内容
c.execute("select * from SZ000002")
print(c.fetchall())
# [(1, '2019-1-1', '2', 10.12, 10.12, 10.12, 10.12, 'Buy Signal'), (2, '2019-1-2', '2', 10.13, 10.13, 10.13, 10.13, 'Sell Signal'), (3, '2019-1-3', '2', 10.14, 10.14, 10.14, 10.14, 'Buy Signal'), (4, '2019-1-4', '2', 10.15, 10.15, 10.15, 10.15, 'Sell Signal')]
# 更新表
c.execute("UPDATE SZ000002 set DESCRIPTION = 'None' where ID=1")
conn.commit()
c.execute("select * from SZ000002")
print(c.fetchall())
# [(1, '2019-1-1', '2', 10.12, 10.12, 10.12, 10.12, 'None'), (2, '2019-1-2', '2', 10.13, 10.13, 10.13, 10.13, 'Sell Signal'), (3, '2019-1-3', '2', 10.14, 10.14, 10.14, 10.14, 'Buy Signal'), (4, '2019-1-4', '2', 10.15, 10.15, 10.15, 10.15, 'Sell Signal')]
# 选择表
cursor = conn.execute("SELECT id, time, code, description from SZ000002 where HIGH < 10.15 and HIGH > 10.12")
for row in cursor:
print("ID = {}; TIME = {}; CODE = {}; description = {};".format(row[0], row[1], row[2], row[3]))
# ID = 2; TIME = 2019-1-2; CODE = 2; description = Sell Signal;
# ID = 3; TIME = 2019-1-3; CODE = 2; description = Buy Signal;
# 删除表数据
c.execute("DELETE from SZ000002 where ID=2;")
conn.commit()
c.execute("select * from SZ000002")
print(c.fetchall())
# [(1, '2019-1-1', '2', 10.12, 10.12, 10.12, 10.12, 'None'), (3, '2019-1-3', '2', 10.14, 10.14, 10.14, 10.14, 'Buy Signal'), (4, '2019-1-4', '2', 10.15, 10.15, 10.15, 10.15, 'Sell Signal')]
# 删除一个表
c.execute("drop table SZ000002")
conn.commit()
conn.close()
except:
# 删除一个表
c.execute("drop table SZ000002")
conn.commit()
conn.close()
# 获取格力电器日线行情数据
def sqlite_test2():
df_gldq = pro_daily_stock('000651.SZ', '20190101', '20190201')
print(df_gldq.head())
"""
High Low Open Close Volume
Date
2019-01-02 36.45 35.70 36.45 35.80 424789.84
2019-01-03 36.19 35.75 35.80 35.92 258798.02
2019-01-04 36.70 35.56 35.72 36.65 489612.13
2019-01-07 36.96 36.25 36.88 36.48 392690.76
2019-01-08 36.42 36.03 36.41 36.34 193021.64
"""
df_gldq.to_sql(name='STOCK000651',
con=conn,
index=False,
# index_label='id',
if_exists='replace')
"""
#直接调用sql中的to_sql() 与 DataFrame调用自身的方法效果相同 self参数不同而已
sql.to_sql(pa,
name='STOCK600111',
con=conn,
index=False,
#index_label='molecule_id',
if_exists='append')
"""
sql_gldq = pd.read_sql_query("select * from 'STOCK000651';", conn)
print(df_gldq.head())
# 删除一个表
c.execute("drop table STOCK000651")
conn.commit()
conn.close()
def sqlite_test3():
# 获取当前最新的股票代码
def get_stock_code():
codes = pro.stock_basic(exchange='', list_status='L',
fields='ts_code,symbol,name,area,industry,fullname,enname,market,exchange,curr_type,'
'list_status,list_date,list_date,is_hs').industry.values
#fields:https://tushare.pro/document/2?doc_id=25
return codes
print(get_stock_code())
"""
['000001.SZ' '000002.SZ' '000004.SZ' ... '688122.SH' '688333.SH'
'688388.SH']
"""
from concurrent.futures import ThreadPoolExecutor
def map_fun(code, start='20190101', end='20190201', table_name='STOCK000001', con_name=conn):
try:
data = pro_daily_stock(code, start, end)
data.to_sql(table_name, con_name, index=False, if_exists='append')
except:
print("error code is %s" % code)
def stock_to_sql(table_name, con_name):
stock_code = json_to_str() # 读取股票池Json文件
itr_arg = [code for code in stock_code['股票'].values()]
#print(itr_arg)
with ThreadPoolExecutor(max_workers=8) as executor:
# map_fun 传入的要执行的map函数
# itr_argn 可迭代的参数
# resultn 返回的结果是一个生成器
result = executor.map(map_fun, itr_arg)
def stock_to_sql_for(table_name, con_name, start='20190101', end='20190201'):
stock_code = json_to_str()
for code in stock_code['股票'].values():
try:
data = pro.daily(ts_code=code, start_date=start, end_date=end)
time.sleep(0.2)
data.to_sql(table_name, con_name, index=False, if_exists='append')
print("right code is %s" % code)
except:
print("error code is %s" % code)
# 读取整张表数据
df = pd.read_sql_query("select * from " + table_name, con_name)
print(df)
"""
ts_code trade_date open ... pct_chg vol amount
0 000001.SZ 20190201 11.20 ... 0.90 1.01e+06 1.13e+06
1 000001.SZ 20190131 10.98 ... 1.37 8.32e+05 9.23e+05
2 000001.SZ 20190130 10.95 ... -0.45 7.12e+05 7.85e+05
3 000001.SZ 20190129 10.96 ... 0.55 8.27e+05 9.05e+05
4 000001.SZ 20190128 11.04 ... -0.55 1.04e+06 1.14e+06
... ... ... ... ... ... ... ...
82400 603999.SH 20190108 5.06 ... -0.20 4.08e+04 2.08e+04
82401 603999.SH 20190107 5.00 ... 1.40 4.30e+04 2.15e+04
82402 603999.SH 20190104 4.70 ... 5.05 4.70e+04 2.31e+04
82403 603999.SH 20190103 4.79 ... 0.00 1.83e+04 8.77e+03
82404 603999.SH 20190102 4.86 ... -1.86 1.73e+04 8.32e+03
[82405 rows x 11 columns]
"""
def sqlite_test4():
stock_to_sql_for('STOCK000001', conn) # 下载/更新数据库
def sqlite_test5():
df = pd.read_sql_query(
"select * from 'STOCK000001' where close > 9 and close < 10 and pct_chg > 5 and trade_date == '20190128'",
conn)
print(df.loc[:, ['ts_code', 'trade_date', 'close', 'pct_chg', 'vol']])
"""
ts_code trade_date close pct_chg vol
0 000603.SZ 20190128 9.62 5.48 79957.80
1 002127.SZ 20190128 9.34 6.02 178280.73
2 600992.SH 20190128 9.12 6.92 110388.24
3 601615.SH 20190128 9.10 10.04 12091.95
"""
def sqlite_test6(): # pct_chg > 5 条形图
df = pd.read_sql_query("select * from 'STOCK000001' where pct_chg > 5", conn)
count_ = df.groupby('trade_date')['ts_code'].count()
# 绘图
plt.bar(range(len(count_.index)), count_.values, align='center', color='steelblue', alpha=0.8)
# 添加轴标签
plt.ylabel('count')
# 添加刻度标签
plt.xticks(range(len(count_.index)), count_.index, rotation=45)
# 添加标题
plt.title('pct_chg > 5 time distribution')
# 为每个条形图添加数值标签
for x, y in enumerate(count_.values):
plt.text(x, y, '%s' % y, ha='center')
# 显示图形
plt.show()
def sqlite_test7(): # pct_chg < -5 条形图
df = pd.read_sql_query("select * from 'STOCK000001' where pct_chg < -5", conn)
count_ = df.groupby('trade_date')['ts_code'].count()
# 绘图
plt.bar(range(len(count_.index)), count_.values, align='center', color='steelblue', alpha=0.8)
# 添加轴标签
plt.ylabel('count')
# 添加刻度标签
plt.xticks(range(len(count_.index)), count_.index, rotation=45)
# 添加标题
plt.title('pct_chg < -5 time distribution')
# 为每个条形图添加数值标签
for x, y in enumerate(count_.values):
plt.text(x, y, '%s' % y, ha='center')
# 显示图形
plt.show()
def sqlite_test8():
# 慕课网手记集合:Python基础系列讲解——如何使用自带的SQLite数据库
conn = sqlite3.connect('stock-data.db')
c = conn.cursor()
# 创建表1
c.execute('''CREATE TABLE STOCK600123
(ID INT PRIMARY KEY NOT NULL,
TIME TEXT NOT NULL,
CODE INT NOT NULL,
HIGH REAL,
LOW REAL,
CLOSE REAL,
OPEN REAL,
DESCRIPTION CHAR(50));''')
conn.commit()
# 查询表结构
c.execute("PRAGMA table_info(STOCK600123)")
print(c.fetchall())
# 创建表2
c.execute('''CREATE TABLE STOCK600111
(ID INT PRIMARY KEY NOT NULL,
TIME TEXT NOT NULL,
CODE INT NOT NULL,
HIGH REAL,
LOW REAL,
CLOSE REAL,
OPEN REAL,
DESCRIPTION CHAR(50));''')
conn.commit()
# 查询所有表
c.execute("SELECT name from sqlite_master where type='table'")
print(c.fetchall())
# 插入表
c.execute("INSERT INTO STOCK600123 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (1, '2019-1-1', 600123, 10.12, 10.12, 10.12, 10.12,'event1' )")
c.execute("INSERT INTO STOCK600123 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (2, '2019-1-2', 600123, 10.13, 10.13, 10.13, 10.13,'event2' )")
c.execute("INSERT INTO STOCK600123 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (3, '2019-1-3', 600123, 10.14, 10.14, 10.14, 10.14,'event3' )")
c.execute("INSERT INTO STOCK600123 (ID,TIME,CODE,HIGH,LOW,CLOSE,OPEN,DESCRIPTION) \
VALUES (4, '2019-1-4', 600123, 10.15, 10.15, 10.15, 10.15,'event4' )")
conn.commit()
c.execute("select * from STOCK600123")
print(c.fetchall())
conn.commit()
# 更新表
c.execute("UPDATE STOCK600123 set CODE = 600888 where ID=1")
conn.commit()
c.execute("select * from STOCK600123")
print(c.fetchall())
# 筛选条件 进行组合查询
c.execute("SELECT * from STOCK600123 where HIGH <= 10.13 and LOW >= 10.12")
print(c.fetchall())
# 模糊查询
c.execute("SELECT * from STOCK600123 where CODE like '%600123%'")
print(c.fetchall())
# 排序查询
c.execute("SELECT * from STOCK600123 where CODE like '%600123%' order by HIGH desc")
print(c.fetchall())
# 统计与计算
c.execute("SELECT count(*) from STOCK600123") # 统计个数 [(4,)]
print(c.fetchall())
c.execute("SELECT MAX(HIGH) from STOCK600123") # 统计最大 [(10.15,)]
print(c.fetchall())
c.execute("SELECT SUM(HIGH) from STOCK600123") # 统计之和 [(40.54,)]
print(c.fetchall())
c.execute("SELECT AVG(HIGH) from STOCK600123") # 统计平均 [(10.135,)]
print(c.fetchall())
# 分组
c.execute("SELECT MAX(HIGH) from STOCK600123 where CODE = 600123 group by TIME")
print(c.fetchall())
# 分页查询
c.execute("SELECT * from STOCK600123 limit 2,1")
print(c.fetchall())
# 删除表数据
c.execute("DELETE from STOCK600123 where ID=2;")
conn.commit()
# 选择表
cursor = conn.execute("SELECT id, time, code, description from STOCK600123")
for row in cursor:
print("ID = {}; TIME = {}; CODE = {}; description = {};\n".format(row[0], row[1], row[2], row[3]))
# 删除一个表
c.execute("drop table STOCK600123")
c.execute("drop table STOCK600111")
conn.commit()
conn.close()
#sqlite_test1()
sqlite_test3() | 36.278378 | 282 | 0.563138 |
ace009a3d18bc9155330599d37962d088d4ab0d6 | 249 | py | Python | 2019/05-harekaze/rev-scramble/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 25 | 2019-03-06T11:55:56.000Z | 2021-05-21T22:07:14.000Z | 2019/05-harekaze/rev-scramble/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2020-06-25T07:27:15.000Z | 2020-06-25T07:27:15.000Z | 2019/05-harekaze/rev-scramble/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2019-02-14T00:42:28.000Z | 2019-02-14T00:42:28.000Z | import angr
proj = angr.Project("./scramble")
# puts("Correct!") のアドレス
target_addr = 0x40073E
state = proj.factory.entry_state()
simgr = proj.factory.simgr(state)
simgr.explore(find=target_addr)
state = simgr.found[0]
print(state.posix.dumps(0))
| 19.153846 | 34 | 0.742972 |
ace00ac6b58e3c5f90e06eafa2830b896693d167 | 2,839 | py | Python | main10.py | aasworth/E01a-Control-Structues | f8bfad605683c54eba721f70acd71fb7d334f4e9 | [
"MIT"
] | null | null | null | main10.py | aasworth/E01a-Control-Structues | f8bfad605683c54eba721f70acd71fb7d334f4e9 | [
"MIT"
] | null | null | null | main10.py | aasworth/E01a-Control-Structues | f8bfad605683c54eba721f70acd71fb7d334f4e9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys, utils, random # import the modules we will need
utils.check_version((3,7)) # make sure we are running at least Python 3.7
utils.clear() # clear the screen
print('Greetings!') # Prints "Greetings!" to the terminal.
colors = ['red','orange','yellow','green','blue','violet','purple'] #Creates a string array of different color options
play_again = '' # Assigns the variable "play_again" to an empty string
best_count = sys.maxsize # the biggest number
while (play_again != 'n' and play_again != 'no'): # Starts a while loop with the conditional statements to make sure that play again is not set to no, and not to n
match_color = random.choice(colors) # This sets the variable match_color to a random choice of colors out of the color array
count = 0 # Sets the count to 0
color = '' # Sets the color to an empty string to begin
while (color != match_color): # Creates another while loop that runs to check what color the user enters
color = input("\nWhat is my favorite color? ") #\n is a special code that adds a new line, and asks the user for input
color = color.lower().strip() # This takes the user input and strips the empty spaces from the beginning and end, and puts it to all lowercase
count += 1 # Increments the variable count
if (color == match_color): # An if-loop to check if the user input color matches the random color chosen in line 14
print('Correct!') # Prints Correct
else: # A complement to the If/Else loop
print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count)) #Prints to the terminal letting the user know he/she did not guess correctly, and how many times they have guessed
print('\nYou guessed it in {0} tries!'.format(count)) # This prints to the user that they have guessed correctly, and how many guesses it took. It doesn't need to be in the If/Else statement because all other cases are check already
if (count < best_count): #This is an if statement to check if the current count is lower than the best count recorded
print('This was your best guess so far!') #if the previous line returns true, it prints to the user that it was their best count of guesses
best_count = count #Because it was the best guess count, it makes that guess count the best_count variable
play_again = input("\nWould you like to play again? ").lower().strip() #This asks the user if they would like to play again, and strips blank space off it and makes it all lowercase if needed.
print('Thanks for playing!') #Prints out to the user that the game is over, only if they answer no that they would not like to play again. | 94.633333 | 238 | 0.686509 |
ace00afc2c6c0b7075e9e14795610d0c8c38fc74 | 2,933 | py | Python | var/spack/repos/builtin/packages/py-pylint/package.py | electronicvisions/spack | d6121eb35b4948f7d8aef7ec7a305a5123a7439e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/py-pylint/package.py | einc-eu/spack | 15468b92ed21d970c0111ae19144e85e66746433 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-05-28T06:39:59.000Z | 2022-03-30T15:12:35.000Z | var/spack/repos/builtin/packages/py-pylint/package.py | einc-eu/spack | 15468b92ed21d970c0111ae19144e85e66746433 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPylint(PythonPackage):
"""python code static checker"""
pypi = "pylint/pylint-1.6.5.tar.gz"
version('2.5.3', sha256='7dd78437f2d8d019717dbf287772d0b2dbdfd13fc016aa7faa08d67bccc46adc')
version('2.4.4', sha256='3db5468ad013380e987410a8d6956226963aed94ecb5f9d3a28acca6d9ac36cd')
version('2.3.1', sha256='723e3db49555abaf9bf79dc474c6b9e2935ad82230b10c1138a71ea41ac0fff1')
version('2.3.0', sha256='ee80c7af4f127b2a480d83010c9f0e97beb8eaa652b78c2837d3ed30b12e1182')
version('1.9.4', sha256='ee1e85575587c5b58ddafa25e1c1b01691ef172e139fc25585e5d3f02451da93')
version('1.7.2', sha256='ea6afb93a9ed810cf52ff3838eb3a15e2bf6a81b80de0eaede1ce442caa5ca69')
version('1.6.5', sha256='a673984a8dd78e4a8b8cfdee5359a1309d833cf38405008f4a249994a8456719')
version('1.4.3', sha256='1dce8c143a5aa15e0638887c2b395e2e823223c63ebaf8d5f432a99e44b29f60')
version('1.4.1', sha256='3e383060edd432cbbd0e8bd686f5facfe918047ffe1bb401ab5897cb6ee0f030')
extends('python', ignore=r'bin/pytest')
depends_on('python@2.7:2.8,3.4:3.6', when='@:1', type=('build', 'run'))
depends_on('python@3.4:', when='@2:', type=('build', 'run'))
depends_on('python@3.5.0:', when='@2.4.0:')
depends_on('py-astroid', type=('build', 'run'))
# note there is no working version of astroid for this
depends_on('py-astroid@1.5.1:', type=('build', 'run'), when='@1.7:')
depends_on('py-astroid@1.6:1.9', type=('build', 'run'), when='@1.9.4')
depends_on('py-astroid@2.0:', type=('build', 'run'), when='@2.2.0:')
depends_on('py-astroid@2.2.0:2.999.999', type=('build', 'run'), when='@2.3.0:')
depends_on('py-astroid@2.3.0:2.3.999', type=('build', 'run'), when='@2.4.0:2.4.999')
depends_on('py-astroid@2.4.0:2.4.999', type=('build', 'run'), when='@2.5.0:')
depends_on('py-six', type=('build', 'run'), when='@1:')
depends_on('py-isort@4.2.5:', type=('build', 'run'))
depends_on('py-isort@4.2.5:4.999', when='@2.3.1:', type=('build', 'run'))
depends_on('py-mccabe', type=('build', 'run'))
depends_on('py-mccabe@0.6.0:0.6.999', when='@2.3.1:', type=('build', 'run'))
depends_on('py-editdistance', type=('build', 'run'), when='@:1.7')
depends_on('py-setuptools@17.1:', type='build')
# depends_on('py-setuptools-scm@1.15.0:', type='build')
depends_on('py-configparser', when='^python@:2.8', type=('build', 'run'))
depends_on('py-backports-functools-lru-cache', when='^python@:2.8', type=('build', 'run'))
depends_on('py-singledispatch', when='^python@:3.3.99', type=('build', 'run'))
# FIXME: Extra EV
depends_on('py-toml@0.7.1:', type=('build', 'run'), when='@2.5.0:')
depends_on('py-pytest-runner', type=('build', 'run'))
| 58.66 | 95 | 0.674395 |
ace00b01bd08027ad902cd132fbfa3593ea1468b | 1,656 | py | Python | tests/test_pm_imagenet.py | zccoder/pretrained-models.pytorch | 56aa8c921819d14fb36d7248ab71e191b37cb146 | [
"BSD-3-Clause"
] | 1 | 2019-01-17T15:44:11.000Z | 2019-01-17T15:44:11.000Z | tests/test_pm_imagenet.py | zccoder/pretrained-models.pytorch | 56aa8c921819d14fb36d7248ab71e191b37cb146 | [
"BSD-3-Clause"
] | null | null | null | tests/test_pm_imagenet.py | zccoder/pretrained-models.pytorch | 56aa8c921819d14fb36d7248ab71e191b37cb146 | [
"BSD-3-Clause"
] | 1 | 2019-01-22T01:41:01.000Z | 2019-01-22T01:41:01.000Z | import pytest
import torch
import torch.nn as nn
from torch.autograd import Variable
import pretrainedmodels as pm
import pretrainedmodels.utils as utils
pm_args = []
for model_name in pm.model_names:
for pretrained in pm.pretrained_settings[model_name]:
if pretrained in ['imagenet', 'imagenet+5k']:
pm_args.append((model_name, pretrained))
img = utils.LoadImage()('data/cat.jpg')
def equal(x,y):
return torch.le(torch.dist(x, y), 1e-6)
@pytest.mark.parametrize('model_name, pretrained', pm_args)
def test_pm_imagenet(model_name, pretrained):
print('test_pm_imagenet("{}")'.format(model_name))
net = pm.__dict__[model_name](
num_classes=1000,
pretrained=pretrained)
net.eval()
if 'nasnetalarge' == model_name:
# nasnetalarge too big for travis
return
if 'pnasnet5large' == model_name:
# pnasnet5large too big for travis
return
tensor = utils.TransformImage(net)(img)
tensor = tensor.unsqueeze(0)
x = Variable(tensor, requires_grad=False)
out_logits = net(x)
if 'squeezenet' in model_name:
# Conv2d without view at the end
assert out_logits.shape == torch.Size([1,1000,1,1])
return
assert out_logits.shape == torch.Size([1,1000])
out_feats = net.features(x)
out_logits_2 = net.logits(out_feats)
assert equal(out_logits, out_logits_2)
if 'dpn' in model_name:
# Conv2d instead of Linear
return
net.last_linear = nn.Linear(
net.last_linear.in_features,
10)
out_logits_3 = net.logits(out_feats)
assert out_logits_3.shape == torch.Size([1,10])
| 27.147541 | 59 | 0.673309 |
ace00dd443212ada29bd1d7c6074d50d927ae50d | 325 | py | Python | users/migrations/0007_remove_profile_hash.py | dizzyplay/djangoboard | 99eb7cedca2772d78577974051b78dd522b90bd3 | [
"MIT"
] | null | null | null | users/migrations/0007_remove_profile_hash.py | dizzyplay/djangoboard | 99eb7cedca2772d78577974051b78dd522b90bd3 | [
"MIT"
] | 6 | 2020-02-11T23:38:07.000Z | 2021-09-08T00:42:36.000Z | users/migrations/0007_remove_profile_hash.py | dizzyplay/djangoboard | 99eb7cedca2772d78577974051b78dd522b90bd3 | [
"MIT"
] | 1 | 2019-03-21T17:43:42.000Z | 2019-03-21T17:43:42.000Z | # Generated by Django 2.1.2 on 2018-11-14 05:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20181114_1230'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='hash',
),
]
| 18.055556 | 47 | 0.587692 |
ace00f47081103d2fef657e2f3f867ec63afab71 | 1,624 | py | Python | examples/run_Cp2kPhonopyWorkChain.py | mbercx/aiida-lsmo | 0999ccec3e445cfd0dfd37a65ab013299a5f7d51 | [
"MIT"
] | 2 | 2019-03-05T10:53:46.000Z | 2019-05-21T12:35:52.000Z | examples/run_Cp2kPhonopyWorkChain.py | mbercx/aiida-lsmo | 0999ccec3e445cfd0dfd37a65ab013299a5f7d51 | [
"MIT"
] | 59 | 2019-11-26T15:25:36.000Z | 2022-02-22T12:37:18.000Z | examples/run_Cp2kPhonopyWorkChain.py | mbercx/aiida-lsmo | 0999ccec3e445cfd0dfd37a65ab013299a5f7d51 | [
"MIT"
] | 6 | 2019-12-04T14:43:28.000Z | 2022-01-20T18:08:12.000Z | # -*- coding: utf-8 -*-
""" Test/example for the Cp2kPhonopyWorkChain"""
import click
from aiida.engine import run
from aiida.orm import load_node
from aiida.plugins import WorkflowFactory, DataFactory
from aiida import cmdline
Cp2kPhonopyWorkChain = WorkflowFactory('lsmo.cp2k_phonopy')
Str = DataFactory('str') # pylint: disable=invalid-name
Int = DataFactory('int') # pylint: disable=invalid-name
def run_cp2k_phonopy(cp2k_code, structure_pk):
"""Test the workchain in both serial and parallel mode"""
for mode in ['serial', 'parallel']:
print(f'>>> Compute forces + 3 displacements for water - MODE: {mode}')
builder = Cp2kPhonopyWorkChain.get_builder()
builder.structure = load_node(structure_pk)
builder.mode = Str(mode)
builder.max_displacements = Int(3) # Compute only few displacements (instead of 6N) for sake of time
builder.cp2k_base.cp2k.code = cp2k_code
builder.cp2k_base.cp2k.metadata.options.resources = {
'num_machines': 1,
'num_mpiprocs_per_machine': 1,
}
builder.cp2k_base.cp2k.metadata.options.max_wallclock_seconds = 1 * 3 * 60
run(builder)
@click.command('cli')
@cmdline.utils.decorators.with_dbenv()
@click.option('--cp2k-code', type=cmdline.params.types.CodeParamType())
@click.option('--structure-pk') # use the pk of a CifData/StructureData which is the descendant of a CP2K calculation
def cli(cp2k_code, structure_pk):
"""Click interface"""
run_cp2k_phonopy(cp2k_code, structure_pk)
if __name__ == '__main__':
cli() # pylint: disable=no-value-for-parameter
| 36.088889 | 118 | 0.706281 |
ace0104f1c8c50c2ca205cb50a935e22d795fe2a | 1,030 | py | Python | 12865/solution.py | bossm0n5t3r/BOJ | 03132388a0c76ef66d6b0dec2053aeca65c4aee6 | [
"MIT"
] | 2 | 2020-01-14T07:27:25.000Z | 2020-02-12T07:49:58.000Z | 12865/solution.py | bossm0n5t3r/BOJ | 03132388a0c76ef66d6b0dec2053aeca65c4aee6 | [
"MIT"
] | 1 | 2020-01-14T07:29:30.000Z | 2021-11-28T11:29:08.000Z | 12865/solution.py | bossm0n5t3r/BOJ | 03132388a0c76ef66d6b0dec2053aeca65c4aee6 | [
"MIT"
] | null | null | null | import sys
def sol():
# sys.stdin = open("./12865/input.txt")
input = sys.stdin.readline
N, K = map(int, input().split())
dp = {0: 0}
while N:
N -= 1
w, v = map(int, input().split())
u = {}
for value, weight in dp.items():
nv, nw = v + value, w + weight
if nw < dp.get(nv, K + 1):
u[nv] = nw
dp.update(u)
print(max(dp.keys()))
def sol_original():
# sys.stdin = open("./12865/input.txt")
input = sys.stdin.readline
N, K = map(int, input().split())
w = [0] * (N + 1)
v = [0] * (N + 1)
for i in range(1, N + 1):
w[i], v[i] = map(int, input().split())
dp = [[0] * (K + 1) for _ in range(N + 1)]
for i in range(1, N + 1):
for j in range(K + 1):
if w[i] > j:
dp[i][j] = dp[i - 1][j]
else:
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - w[i]] + v[i])
print(dp[N][K])
if __name__ == "__main__":
sol()
sol_original()
| 24.52381 | 72 | 0.425243 |
ace01102960c429287dbab92501d3e3fda185a1f | 13,534 | py | Python | buildkite/aggregate_incompatible_flags_test_result.py | jgertm/continuous-integration | 0c17db9d04a193c18594f5fd140111271c63a328 | [
"Apache-2.0"
] | 198 | 2015-10-08T09:20:49.000Z | 2022-03-09T10:29:54.000Z | buildkite/aggregate_incompatible_flags_test_result.py | jgertm/continuous-integration | 0c17db9d04a193c18594f5fd140111271c63a328 | [
"Apache-2.0"
] | 882 | 2016-03-17T09:34:02.000Z | 2022-03-29T17:42:59.000Z | buildkite/aggregate_incompatible_flags_test_result.py | jgertm/continuous-integration | 0c17db9d04a193c18594f5fd140111271c63a328 | [
"Apache-2.0"
] | 132 | 2016-03-30T21:04:40.000Z | 2022-03-22T13:16:30.000Z | #!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import re
import subprocess
import sys
import threading
import bazelci
BUILDKITE_ORG = os.environ["BUILDKITE_ORGANIZATION_SLUG"]
PIPELINE = os.environ["BUILDKITE_PIPELINE_SLUG"]
FAIL_IF_MIGRATION_REQUIRED = os.environ.get("USE_BAZELISK_MIGRATE", "").upper() == "FAIL"
INCOMPATIBLE_FLAG_LINE_PATTERN = re.compile(
r"\s*(?P<flag>--incompatible_\S+)\s*(\(Bazel (?P<version>.+?): (?P<url>.+?)\))?"
)
FlagDetails = collections.namedtuple("FlagDetails", ["bazel_version", "issue_url"])
class LogFetcher(threading.Thread):
def __init__(self, job, client):
threading.Thread.__init__(self)
self.job = job
self.client = client
self.log = None
def run(self):
self.log = self.client.get_build_log(self.job)
def process_build_log(failed_jobs_per_flag, already_failing_jobs, log, job, details_per_flag):
if "Failure: Command failed, even without incompatible flags." in log:
already_failing_jobs.append(job)
def handle_failing_flags(line, details_per_flag):
flag = extract_flag_details(line, details_per_flag)
if flag:
failed_jobs_per_flag[flag][job["id"]] = job
# bazelisk --migrate might run for multiple times for run / build / test,
# so there could be several "+++ Result" sections.
while "+++ Result" in log:
index_success = log.rfind("Command was successful with the following flags:")
index_failure = log.rfind("Migration is needed for the following flags:")
if index_success == -1 or index_failure == -1:
raise bazelci.BuildkiteException("Cannot recognize log of " + job["web_url"])
extract_all_flags(log[index_success:index_failure], extract_flag_details, details_per_flag)
extract_all_flags(log[index_failure:], handle_failing_flags, details_per_flag)
log = log[0 : log.rfind("+++ Result")]
# If the job failed for other reasons, we add it into already failing jobs.
if job["state"] == "failed":
already_failing_jobs.append(job)
def extract_all_flags(log, line_callback, details_per_flag):
for line in log.split("\n"):
line_callback(line, details_per_flag)
def extract_flag_details(line, details_per_flag):
match = INCOMPATIBLE_FLAG_LINE_PATTERN.match(line)
if match:
flag = match.group("flag")
if details_per_flag.get(flag, (None, None)) == (None, None):
details_per_flag[flag] = FlagDetails(
bazel_version=match.group("version"), issue_url=match.group("url")
)
return flag
def get_html_link_text(content, link):
return f'<a href="{link}" target="_blank">{content}</a>'
# Check if any of the given jobs needs to be migrated by the Bazel team
def needs_bazel_team_migrate(jobs):
for job in jobs:
pipeline, _ = get_pipeline_and_platform(job)
if pipeline in bazelci.DOWNSTREAM_PROJECTS and bazelci.DOWNSTREAM_PROJECTS[pipeline].get(
"owned_by_bazel"
):
return True
return False
def print_flags_ready_to_flip(failed_jobs_per_flag, details_per_flag):
info_text1 = ["#### The following flags didn't break any passing projects"]
for flag in sorted(list(details_per_flag.keys())):
if flag not in failed_jobs_per_flag:
html_link_text = get_html_link_text(":github:", details_per_flag[flag].issue_url)
info_text1.append(f"* **{flag}** {html_link_text}")
if len(info_text1) == 1:
info_text1 = []
info_text2 = [
"#### The following flags didn't break any passing Bazel team owned/co-owned projects"
]
for flag, jobs in failed_jobs_per_flag.items():
if not needs_bazel_team_migrate(jobs.values()):
failed_cnt = len(jobs)
s1 = "" if failed_cnt == 1 else "s"
s2 = "s" if failed_cnt == 1 else ""
html_link_text = get_html_link_text(":github:", details_per_flag[flag].issue_url)
info_text2.append(
f"* **{flag}** {html_link_text} ({failed_cnt} other job{s1} need{s2} migration)"
)
if len(info_text2) == 1:
info_text2 = []
print_info("flags_ready_to_flip", "success", info_text1 + info_text2)
def print_already_fail_jobs(already_failing_jobs):
info_text = ["#### The following jobs already fail without incompatible flags"]
info_text += merge_and_format_jobs(already_failing_jobs, "* **{}**: {}")
if len(info_text) == 1:
return
print_info("already_fail_jobs", "warning", info_text)
def print_projects_need_to_migrate(failed_jobs_per_flag):
info_text = ["#### The following projects need migration"]
jobs_need_migration = {}
for jobs in failed_jobs_per_flag.values():
for job in jobs.values():
jobs_need_migration[job["name"]] = job
job_list = jobs_need_migration.values()
job_num = len(job_list)
if job_num == 0:
return
projects = set()
for job in job_list:
project, _ = get_pipeline_and_platform(job)
projects.add(project)
project_num = len(projects)
s1 = "" if project_num == 1 else "s"
s2 = "s" if project_num == 1 else ""
info_text.append(
f"<details><summary>{project_num} project{s1} need{s2} migration, click to see details</summary><ul>"
)
entries = merge_and_format_jobs(job_list, " <li><strong>{}</strong>: {}</li>")
info_text += entries
info_text.append("</ul></details>")
info_str = "\n".join(info_text)
bazelci.execute_command(
[
"buildkite-agent",
"annotate",
"--append",
"--context=projects_need_migration",
"--style=error",
f"\n{info_str}\n",
]
)
def print_flags_need_to_migrate(failed_jobs_per_flag, details_per_flag):
# The info box printed later is above info box printed before,
# so reverse the flag list to maintain the same order.
printed_flag_boxes = False
for flag in sorted(list(failed_jobs_per_flag.keys()), reverse=True):
jobs = failed_jobs_per_flag[flag]
if jobs:
github_url = details_per_flag[flag].issue_url
info_text = [f"* **{flag}** " + get_html_link_text(":github:", github_url)]
jobs_per_pipeline = merge_jobs(jobs.values())
for pipeline, platforms in jobs_per_pipeline.items():
bazel_mark = ""
if pipeline in bazelci.DOWNSTREAM_PROJECTS and bazelci.DOWNSTREAM_PROJECTS[
pipeline
].get("owned_by_bazel"):
bazel_mark = ":bazel:"
platforms_text = ", ".join(platforms)
info_text.append(f" - {bazel_mark}**{pipeline}**: {platforms_text}")
# Use flag as the context so that each flag gets a different info box.
print_info(flag, "error", info_text)
printed_flag_boxes = True
if not printed_flag_boxes:
return
info_text = [
"#### Downstream projects need to migrate for the following flags:",
"Projects marked with :bazel: need to be migrated by the Bazel team.",
]
print_info("flags_need_to_migrate", "error", info_text)
def merge_jobs(jobs):
jobs_per_pipeline = collections.defaultdict(list)
for job in sorted(jobs, key=lambda s: s["name"].lower()):
pipeline, platform = get_pipeline_and_platform(job)
jobs_per_pipeline[pipeline].append(get_html_link_text(platform, job["web_url"]))
return jobs_per_pipeline
def merge_and_format_jobs(jobs, line_pattern):
# Merges all jobs for a single pipeline into one line.
# Example:
# pipeline (platform1)
# pipeline (platform2)
# pipeline (platform3)
# with line_pattern ">> {}: {}" becomes
# >> pipeline: platform1, platform2, platform3
jobs_per_pipeline = merge_jobs(jobs)
return [
line_pattern.format(pipeline, ", ".join(platforms))
for pipeline, platforms in jobs_per_pipeline.items()
]
def get_pipeline_and_platform(job):
name = job["name"]
platform = ""
for p in bazelci.PLATFORMS.values():
platform_label = p.get("emoji-name")
if platform_label in name:
platform = platform_label
name = name.replace(platform_label, "")
break
name = name.partition("-")[0].partition("(")[0].strip()
return name, platform
def print_info(context, style, info):
# CHUNK_SIZE is to prevent buildkite-agent "argument list too long" error
CHUNK_SIZE = 20
for i in range(0, len(info), CHUNK_SIZE):
info_str = "\n".join(info[i : i + CHUNK_SIZE])
bazelci.execute_command(
[
"buildkite-agent",
"annotate",
"--append",
f"--context={context}",
f"--style={style}",
f"\n{info_str}\n",
]
)
def analyze_logs(build_number, client):
build_info = client.get_build_info(build_number)
already_failing_jobs = []
# dict(flag name -> dict(job id -> job))
failed_jobs_per_flag = collections.defaultdict(dict)
# dict(flag name -> (Bazel version where it's flipped, GitHub issue URL))
details_per_flag = {}
threads = []
for job in build_info["jobs"]:
# Some irrelevant job has no "state" field
if "state" in job:
thread = LogFetcher(job, client)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
process_build_log(
failed_jobs_per_flag, already_failing_jobs, thread.log, thread.job, details_per_flag
)
return already_failing_jobs, failed_jobs_per_flag, details_per_flag
def handle_already_flipped_flags(failed_jobs_per_flag, details_per_flag):
# Process and remove all flags that have already been flipped.
# Bazelisk may return already flipped flags if a project uses an old Bazel version
# via its .bazelversion file.
current_major_version = get_bazel_major_version()
failed_jobs_for_new_flags = {}
details_for_new_flags = {}
for flag, details in details_per_flag.items():
if not details.bazel_version or details.bazel_version < current_major_version:
# TOOD(fweikert): maybe display a Buildkite annotation
bazelci.eprint(
"Ignoring {} since it has already been flipped in Bazel {} (latest is {}).".format(
flag, details.bazel_version, current_major_version
)
)
continue
details_for_new_flags[flag] = details
if flag in failed_jobs_per_flag:
failed_jobs_for_new_flags[flag] = failed_jobs_per_flag[flag]
return failed_jobs_for_new_flags, details_for_new_flags
def get_bazel_major_version():
# Get bazel major version on CI, eg. 0.21 from "Build label: 0.21.0\n..."
output = subprocess.check_output(
["bazel", "--nomaster_bazelrc", "--bazelrc=/dev/null", "version"]
).decode("utf-8")
return output.split()[2].rsplit(".", 1)[0]
def print_result_info(already_failing_jobs, failed_jobs_per_flag, details_per_flag):
print_flags_need_to_migrate(failed_jobs_per_flag, details_per_flag)
print_projects_need_to_migrate(failed_jobs_per_flag)
print_already_fail_jobs(already_failing_jobs)
print_flags_ready_to_flip(failed_jobs_per_flag, details_per_flag)
return bool(failed_jobs_per_flag)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Script to aggregate `bazelisk --migrate` test result for incompatible flags and generate pretty Buildkite info messages."
)
parser.add_argument("--build_number", type=str)
parser.add_argument("--notify", type=bool, nargs="?", const=True)
args = parser.parse_args(argv)
try:
if args.build_number:
client = bazelci.BuildkiteClient(org=BUILDKITE_ORG, pipeline=PIPELINE)
already_failing_jobs, failed_jobs_per_flag, details_per_flag = analyze_logs(
args.build_number, client
)
failed_jobs_per_flag, details_per_flag = handle_already_flipped_flags(
failed_jobs_per_flag, details_per_flag
)
migration_required = print_result_info(
already_failing_jobs, failed_jobs_per_flag, details_per_flag
)
if migration_required and FAIL_IF_MIGRATION_REQUIRED:
bazelci.eprint("Exiting with code 3 since a migration is required.")
return 3
else:
parser.print_help()
return 2
except bazelci.BuildkiteException as e:
bazelci.eprint(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| 35.336815 | 142 | 0.655682 |
ace011635cb11c002c711711f9e1152047cea909 | 5,583 | py | Python | benchmark/cf_policy_search/run_cf_policy_search.py | daturkel/zr-obp | c870a20c3bb6f3a7d1b42bf1ebed01a5f2239aa6 | [
"Apache-2.0"
] | null | null | null | benchmark/cf_policy_search/run_cf_policy_search.py | daturkel/zr-obp | c870a20c3bb6f3a7d1b42bf1ebed01a5f2239aa6 | [
"Apache-2.0"
] | null | null | null | benchmark/cf_policy_search/run_cf_policy_search.py | daturkel/zr-obp | c870a20c3bb6f3a7d1b42bf1ebed01a5f2239aa6 | [
"Apache-2.0"
] | null | null | null | import argparse
from pathlib import Path
import yaml
import numpy as np
from pandas import DataFrame
from joblib import Parallel, delayed
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from custom_dataset import OBDWithInteractionFeatures
from obp.policy import IPWLearner
from obp.ope import InverseProbabilityWeighting
# hyperparameter for the regression model used in model dependent OPE estimators
with open("./conf/hyperparams.yaml", "rb") as f:
hyperparams = yaml.safe_load(f)
base_model_dict = dict(
logistic_regression=LogisticRegression,
lightgbm=HistGradientBoostingClassifier,
random_forest=RandomForestClassifier,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="run evaluation policy selection.")
parser.add_argument(
"--n_runs",
type=int,
default=5,
help="number of bootstrap sampling in the experiment.",
)
parser.add_argument(
"--context_set",
type=str,
choices=["1", "2"],
required=True,
help="context sets for contextual bandit policies.",
)
parser.add_argument(
"--base_model",
type=str,
choices=["logistic_regression", "lightgbm", "random_forest"],
required=True,
help="base model for a evaluation policy to be evaluated",
)
parser.add_argument(
"--behavior_policy",
type=str,
choices=["bts", "random"],
default="random",
help="behavior policy, bts or random.",
)
parser.add_argument(
"--campaign",
type=str,
choices=["all", "men", "women"],
required=True,
help="campaign name, men, women, or all.",
)
parser.add_argument(
"--test_size",
type=float,
default=0.5,
help="the proportion of the dataset to include in the test split.",
)
parser.add_argument(
"--n_jobs",
type=int,
default=1,
help="the maximum number of concurrently running jobs.",
)
parser.add_argument("--random_state", type=int, default=12345)
args = parser.parse_args()
print(args)
# configurations
n_runs = args.n_runs
context_set = args.context_set
base_model = args.base_model
behavior_policy = args.behavior_policy
campaign = args.campaign
test_size = args.test_size
n_jobs = args.n_jobs
random_state = args.random_state
np.random.seed(random_state)
data_path = Path("../open_bandit_dataset")
# define a dataset class
obd = OBDWithInteractionFeatures(
behavior_policy=behavior_policy,
campaign=campaign,
data_path=data_path,
context_set=context_set,
)
# define a counterfactual policy based on IPWLearner
counterfactual_policy = IPWLearner(
base_model=base_model_dict[base_model](**hyperparams[base_model]),
n_actions=obd.n_actions,
len_list=obd.len_list,
)
policy_name = f"{base_model}_{context_set}"
# ground-truth policy value of the Bernoulli TS policy (the current best policy) in the test set
# , which is the empirical mean of the factual (observed) rewards (on-policy estimation)
ground_truth = obd.calc_on_policy_policy_value_estimate(
behavior_policy="bts",
campaign=campaign,
data_path=data_path,
test_size=test_size,
is_timeseries_split=True,
)
def process(b: int):
# sample bootstrap from batch logged bandit feedback
boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(
test_size=test_size, is_timeseries_split=True, random_state=b
)
# train an evaluation on the training set of the logged bandit feedback data
action_dist = counterfactual_policy.fit(
context=boot_bandit_feedback["context"],
action=boot_bandit_feedback["action"],
reward=boot_bandit_feedback["reward"],
pscore=boot_bandit_feedback["pscore"],
position=boot_bandit_feedback["position"],
)
# make action selections (predictions)
action_dist = counterfactual_policy.predict(
context=boot_bandit_feedback["context_test"]
)
# estimate the policy value of a given counterfactual algorithm by the three OPE estimators.
ipw = InverseProbabilityWeighting()
return ipw.estimate_policy_value(
reward=boot_bandit_feedback["reward_test"],
action=boot_bandit_feedback["action_test"],
position=boot_bandit_feedback["position_test"],
pscore=boot_bandit_feedback["pscore_test"],
action_dist=action_dist,
)
processed = Parallel(
backend="multiprocessing",
n_jobs=n_jobs,
verbose=50,
)([delayed(process)(i) for i in np.arange(n_runs)])
# save counterfactual policy evaluation results in `./logs` directory
ope_results = np.zeros((n_runs, 2))
for b, estimated_policy_value_b in enumerate(processed):
ope_results[b, 0] = estimated_policy_value_b
ope_results[b, 1] = estimated_policy_value_b / ground_truth
save_path = Path("./logs") / behavior_policy / campaign
save_path.mkdir(exist_ok=True, parents=True)
DataFrame(
ope_results, columns=["policy_value", "relative_policy_value"]
).describe().round(6).to_csv(save_path / f"{policy_name}.csv")
| 35.113208 | 100 | 0.676697 |
ace011baad9e3d63b12fa0b5f9102e7d9fb38617 | 1,751 | py | Python | configpp/evolution/revision.py | voidpp/configpp | 6d395eef6a2279c8902c40c3f005d530674a6cba | [
"MIT"
] | null | null | null | configpp/evolution/revision.py | voidpp/configpp | 6d395eef6a2279c8902c40c3f005d530674a6cba | [
"MIT"
] | 6 | 2018-09-15T09:14:12.000Z | 2019-07-10T11:40:36.000Z | configpp/evolution/revision.py | voidpp/configpp | 6d395eef6a2279c8902c40c3f005d530674a6cba | [
"MIT"
] | null | null | null | import os
import random
import re
from datetime import datetime
from slugify import slugify
REVISION_NUMBER_LENGTH = 12
def gen_rev_number():
start = int('0x1' + '0' * (REVISION_NUMBER_LENGTH-1), 16)
end = int('0x'+ 'F' * REVISION_NUMBER_LENGTH, 16)
return '{:x}'.format(random.randrange(start, end))
class Revision():
FILENAME_PATTERN = re.compile(r'([a-f\d]{%s})_.+\.py' % REVISION_NUMBER_LENGTH)
ORIGINAL_TEMPLATE_FILE_PATH = os.path.join(os.path.dirname(__file__), 'templates', 'script.py.tmpl')
def __init__(self, message: str, id: str, date: datetime = None, parent_id: str = '', handler = None):
self._id = id
self._message = message
self._parent_id = parent_id
self._date = date or datetime.now()
self._handler = handler
@property
def id(self):
return self._id
@property
def date(self):
return self._date
@property
def date_str(self):
return self._date.strftime('%Y-%m-%d %H:%M:%S')
@property
def parent_id(self):
return self._parent_id
@property
def filename(self):
return '{}_{}.py'.format(self._id, slugify(self._message))
@property
def message(self):
return self._message
def upgrade(self, *args):
return self._handler.upgrade(*args)
def downgrade(self, *args):
return self._handler.downgrade(*args)
def __eq__(self, other: 'Revision'):
return self.id == other.id and self.date_str == other.date_str and self.message == other.message and self.parent_id == other.parent_id
def __repr__(self):
return "<Revision id: {}, message: {}, date: {}, parent: {}>".format(self.id, self.message, self.date_str, self.parent_id)
| 28.241935 | 142 | 0.641919 |
ace011f1daae5c2d589df0bcde01774bb87e0d4e | 5,811 | py | Python | map.blender.py | Eibx/Rebronx | 60c56781d61b58294f094c8f48edb5a4feeebe5c | [
"MIT"
] | null | null | null | map.blender.py | Eibx/Rebronx | 60c56781d61b58294f094c8f48edb5a4feeebe5c | [
"MIT"
] | 5 | 2021-03-09T21:04:07.000Z | 2022-02-26T19:16:43.000Z | map.blender.py | Eibx/Rebronx | 60c56781d61b58294f094c8f48edb5a4feeebe5c | [
"MIT"
] | null | null | null | import bpy
import io
import os
import math
random_seed=1337
steps = []
class Connection:
def __init__(self, id, cost):
self.id = id
self.cost = cost
class Step:
def __init__(self, id, x, y):
self.id = id
self.x = x
self.y = y
self.connections = []
def get_step(vert):
for step in steps:
if step.x == vert.co.x and step.y == vert.co.y:
return step
def get_distance(step1, step2):
distx = step1.x - step2.x
disty = step1.y - step2.y
return math.sqrt(abs(distx*distx + disty*disty))
def grid_to_wireframe(thinkness):
# Apply wireframe modifier
bpy.ops.object.modifier_add(type='WIREFRAME')
bpy.context.object.modifiers["Wireframe"].thickness = thinkness
bpy.context.object.modifiers["Wireframe"].use_boundary = True
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Wireframe")
# Make wireframe flat, and remove doubles
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.transform.resize(value=(1, 1, 0), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, False, True), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.mesh.remove_doubles()
# Recalculate normals
bpy.ops.mesh.normals_make_consistent(inside=False)
# Switch to object mode
bpy.ops.object.mode_set(mode="OBJECT")
def rename_selected(name):
bpy.context.active_object.name = name
def select_object(name):
bpy.ops.object.select_all(action='DESELECT')
obj = bpy.data.objects[name]
obj.select_set(state=True)
bpy.context.view_layer.objects.active = obj
def make_selected_fat(thinkness, center):
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.extrude_region_move(TRANSFORM_OT_translate = { "value": (0, 0, thinkness) })
if center:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.transform.translate(value=(0, 0, -(thinkness/2)))
bpy.ops.object.mode_set(mode="OBJECT")
def carve_object(obj, carve):
select_object(obj)
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = bpy.data.objects[carve]
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
select_object(carve)
bpy.ops.object.delete(use_global=False, confirm=False)
# Clean up scene
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False, confirm=False)
# Create grid
bpy.ops.mesh.primitive_grid_add(x_subdivisions=10, y_subdivisions=10, size=20, enter_editmode=True, location=(0, 0, 0))
# Randomize verticies and make them flat
bpy.ops.transform.vertex_random(offset=0.8, seed=random_seed)
bpy.ops.transform.resize(value=(1, 1, 0), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, False, True), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
# Apply transformation for good measure
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
# Rename
rename_selected("grid")
# Dissolve random verts
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_random(percent=20, seed=random_seed)
bpy.ops.mesh.dissolve_verts()
# Add steps to array
bpy.ops.object.mode_set(mode="OBJECT")
obj = bpy.context.active_object
for i, vert in enumerate(obj.data.vertices):
steps.append(Step(i+1, vert.co.x, vert.co.y))
mesh = obj.data
for poly in mesh.polygons:
for index in poly.loop_indices:
edge = mesh.edges[mesh.loops[index].edge_index]
step1 = get_step(mesh.vertices[edge.vertices[0]])
step2 = get_step(mesh.vertices[edge.vertices[1]])
dist = get_distance(step1, step2)
step1.connections.append(Connection(step2.id, dist))
step2.connections.append(Connection(step1.id, dist))
# Select main object again
select_object("grid")
# Duplicate grid
bpy.ops.object.duplicate()
rename_selected("grid_carve")
# Duplicate grid
bpy.ops.object.duplicate()
rename_selected("roads")
# Duplicate grid
bpy.ops.object.duplicate()
rename_selected("pavement")
# Building space
select_object("grid_carve")
grid_to_wireframe(1)
make_selected_fat(2, True)
carve_object("grid", "grid_carve")
# Pavements
select_object("pavement")
grid_to_wireframe(0.5)
make_selected_fat(0.05, False)
# Roads
select_object("roads")
grid_to_wireframe(0.2)
# Cut pavement
select_object("roads")
bpy.ops.object.duplicate()
rename_selected("roads_carve")
make_selected_fat(2, True)
carve_object("pavement", "roads_carve")
# Write map.json file
with open(os.path.join(os.getcwd(), "data/map.json"), "w") as file:
file.write('{\n "nodes": [\n')
for i, step in enumerate(steps):
next_char = ','
if i == len(steps)-1:
next_char = ''
is_step = "true"
if i % 4 == 0:
is_step = "false"
connection_string = ''
for conn in step.connections:
connection_string += '{{ "id": {}, "cost": {} }},'.format(conn.id, conn.cost);
file.write(' {{ "id": {}, "x": {}, "y": {}, "connections": [{}], "step": {} }}{}\n'.format(step.id, step.x, step.y, connection_string[:-1], is_step, next_char))
file.write(" ]\n}")
bpy.ops.export_scene.gltf(filepath=os.path.join(os.getcwd(), "client/public/assets", "city.glb")) | 33.205714 | 350 | 0.691791 |
ace013067fa806c70a616fb1b96d2d8d3ff7fcdb | 7,009 | py | Python | mayan/apps/linking/apps.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 336 | 2019-05-09T07:05:19.000Z | 2022-03-25T09:50:22.000Z | mayan/apps/linking/apps.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 9 | 2019-10-29T00:12:27.000Z | 2021-09-09T15:16:51.000Z | mayan/apps/linking/apps.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 257 | 2019-05-14T10:26:37.000Z | 2022-03-30T03:37:36.000Z | from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.acls.permissions import (
permission_acl_edit, permission_acl_view
)
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.classes import ModelCopy
from mayan.apps.common.menus import (
menu_list_facet, menu_object, menu_related, menu_secondary, menu_setup
)
from mayan.apps.documents.links.document_type_links import link_document_type_list
from mayan.apps.events.classes import EventModelRegistry, ModelEventType
from mayan.apps.navigation.classes import SourceColumn
from mayan.apps.views.html_widgets import TwoStateWidget
from .events import event_smart_link_edited
from .links import (
link_document_type_smart_links, link_smart_link_create,
link_smart_link_condition_create, link_smart_link_condition_delete,
link_smart_link_condition_edit, link_smart_link_condition_list,
link_smart_link_delete, link_smart_link_document_types,
link_smart_link_edit, link_smart_link_instance_view,
link_smart_link_instances_for_document, link_smart_link_list,
link_smart_link_setup
)
from .permissions import (
permission_resolved_smart_link_view, permission_smart_link_delete,
permission_smart_link_edit, permission_smart_link_view
)
class LinkingApp(MayanAppConfig):
app_namespace = 'linking'
app_url = 'smart_links'
has_rest_api = True
has_tests = True
name = 'mayan.apps.linking'
verbose_name = _('Linking')
def ready(self):
super().ready()
Document = apps.get_model(
app_label='documents', model_name='Document'
)
DocumentType = apps.get_model(
app_label='documents', model_name='DocumentType'
)
ResolvedSmartLink = self.get_model(model_name='ResolvedSmartLink')
SmartLink = self.get_model(model_name='SmartLink')
SmartLinkCondition = self.get_model(model_name='SmartLinkCondition')
EventModelRegistry.register(model=SmartLink)
EventModelRegistry.register(model=SmartLinkCondition)
ModelCopy(
model=SmartLinkCondition
).add_fields(
field_names=(
'enabled', 'expression', 'foreign_document_data',
'inclusion', 'negated', 'operator', 'smart_link'
)
)
ModelCopy(
model=SmartLink, bind_link=True, register_permission=True
).add_fields(
field_names=(
'conditions', 'dynamic_label', 'document_types', 'enabled',
'label'
)
)
ModelEventType.register(
event_types=(event_smart_link_edited,), model=SmartLink
)
ModelPermission.register(
model=Document, permissions=(
permission_resolved_smart_link_view,
)
)
ModelPermission.register(
model=SmartLink, permissions=(
permission_acl_edit, permission_acl_view,
permission_resolved_smart_link_view,
permission_smart_link_delete, permission_smart_link_edit,
permission_smart_link_view
)
)
ModelPermission.register_inheritance(
model=SmartLinkCondition, related='smart_link',
)
SourceColumn(
func=lambda context: context['object'].get_label_for(
document=context['document']
), is_identifier=True, label=_('Label'),
source=ResolvedSmartLink
)
source_column_smart_link_label = SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=SmartLink
)
source_column_smart_link_label.add_exclude(source=ResolvedSmartLink)
source_column_smart_link_dynamic_label = SourceColumn(
attribute='dynamic_label', include_label=True, is_sortable=True,
source=SmartLink
)
source_column_smart_link_dynamic_label.add_exclude(
source=ResolvedSmartLink
)
source_column_smart_link_enabled = SourceColumn(
attribute='enabled', include_label=True, is_sortable=True,
source=SmartLink, widget=TwoStateWidget
)
source_column_smart_link_enabled.add_exclude(
source=ResolvedSmartLink
)
SourceColumn(
attribute='get_full_label', is_identifier=True,
source=SmartLinkCondition
)
SourceColumn(
attribute='enabled', include_label=True, is_sortable=True,
source=SmartLinkCondition, widget=TwoStateWidget
)
# Document
menu_list_facet.bind_links(
links=(link_smart_link_instances_for_document,),
sources=(Document,)
)
# Document type
menu_list_facet.bind_links(
links=(link_document_type_smart_links,), sources=(DocumentType,)
)
menu_related.bind_links(
links=(link_smart_link_list,),
sources=(
DocumentType, 'documents:document_type_list',
'documents:document_type_create'
)
)
# Resolved smart link
menu_object.bind_links(
links=(link_smart_link_instance_view,),
sources=(ResolvedSmartLink,)
)
# Smart link
menu_list_facet.bind_links(
exclude=(ResolvedSmartLink,),
links=(
link_smart_link_document_types, link_smart_link_condition_list
), sources=(SmartLink,)
)
menu_object.bind_links(
exclude=(ResolvedSmartLink,),
links=(
link_smart_link_delete, link_smart_link_edit
), sources=(SmartLink,)
)
menu_related.bind_links(
links=(link_document_type_list,),
sources=(
SmartLink, 'linking:smart_link_list',
'linking:smart_link_create'
)
)
menu_secondary.bind_links(
links=(link_smart_link_list, link_smart_link_create),
sources=(
SmartLink, 'linking:smart_link_list',
'linking:smart_link_create'
)
)
# Smart link condition
menu_object.bind_links(
links=(
link_smart_link_condition_edit,
link_smart_link_condition_delete
), sources=(SmartLinkCondition,)
)
menu_secondary.bind_links(
links=(link_smart_link_condition_create,),
sources=(
'linking:smart_link_condition_list',
'linking:smart_link_condition_create',
'linking:smart_link_condition_edit',
'linking:smart_link_condition_delete'
)
)
# Setup
menu_setup.bind_links(links=(link_smart_link_setup,))
| 32.752336 | 82 | 0.640605 |
ace0171c0402d9724fa9f8de58dcb3ac5a8388ea | 15,266 | py | Python | tools/wpt/testfiles.py | Ms2ger/web-platform-tests | 645c0e8a5c028a613e7ad1732834100dbe946fc7 | [
"BSD-3-Clause"
] | 1 | 2022-03-19T09:43:35.000Z | 2022-03-19T09:43:35.000Z | tools/wpt/testfiles.py | Ms2ger/web-platform-tests | 645c0e8a5c028a613e7ad1732834100dbe946fc7 | [
"BSD-3-Clause"
] | null | null | null | tools/wpt/testfiles.py | Ms2ger/web-platform-tests | 645c0e8a5c028a613e7ad1732834100dbe946fc7 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import logging
import os
import re
import subprocess
import sys
from collections import OrderedDict
from six import iteritems
from ..manifest import manifest, update
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
logger = logging.getLogger()
def get_git_cmd(repo_path):
"""Create a function for invoking git commands as a subprocess."""
def git(cmd, *args):
full_cmd = ["git", cmd] + list(item.decode("utf8") if isinstance(item, bytes) else item for item in args)
try:
logger.debug(" ".join(full_cmd))
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT).decode("utf8").strip()
except subprocess.CalledProcessError as e:
logger.error("Git command exited with status %i" % e.returncode)
logger.error(e.output)
sys.exit(1)
return git
def display_branch_point():
print(branch_point())
def branch_point():
git = get_git_cmd(wpt_root)
if (os.environ.get("TRAVIS_PULL_REQUEST", "false") == "false" and
os.environ.get("TRAVIS_BRANCH") == "master"):
# For builds on the master branch just return the HEAD commit
return git("rev-parse", "HEAD")
elif os.environ.get("TRAVIS_PULL_REQUEST", "false") != "false":
# This is a PR, so the base branch is in TRAVIS_BRANCH
travis_branch = os.environ.get("TRAVIS_BRANCH")
assert travis_branch, "TRAVIS_BRANCH environment variable is defined"
branch_point = git("merge-base", "HEAD", travis_branch)
else:
# Otherwise we aren't on a PR, so we try to find commits that are only in the
# current branch c.f.
# http://stackoverflow.com/questions/13460152/find-first-ancestor-commit-in-another-branch
# parse HEAD into an object ref
head = git("rev-parse", "HEAD")
# get everything in refs/heads and refs/remotes that doesn't include HEAD
not_heads = [item for item in git("rev-parse", "--not", "--branches", "--remotes").split("\n")
if item != "^%s" % head]
# get all commits on HEAD but not reachable from anything in not_heads
commits = git("rev-list", "--topo-order", "--parents", "HEAD", *not_heads)
commit_parents = OrderedDict()
if commits:
for line in commits.split("\n"):
line_commits = line.split(" ")
commit_parents[line_commits[0]] = line_commits[1:]
branch_point = None
# if there are any commits, take the first parent that is not in commits
for commit, parents in iteritems(commit_parents):
for parent in parents:
if parent not in commit_parents:
branch_point = parent
break
if branch_point:
break
# if we had any commits, we should now have a branch point
assert branch_point or not commit_parents
# The above heuristic will fail in the following cases:
#
# - The current branch has fallen behind the remote version
# - Changes on the current branch were rebased and therefore do not exist on any
# other branch. This will result in the selection of a commit that is earlier
# in the history than desired (as determined by calculating the later of the
# branch point and the merge base)
#
# In either case, fall back to using the merge base as the branch point.
merge_base = git("merge-base", "HEAD", "origin/master")
if (branch_point is None or
(branch_point != merge_base and
not git("log", "--oneline", "%s..%s" % (merge_base, branch_point)).strip())):
logger.debug("Using merge-base as the branch point")
branch_point = merge_base
else:
logger.debug("Using first commit on another branch as the branch point")
logger.debug("Branch point from master: %s" % branch_point)
return branch_point
def compile_ignore_rule(rule):
rule = rule.replace(os.path.sep, "/")
parts = rule.split("/")
re_parts = []
for part in parts:
if part.endswith("**"):
re_parts.append(re.escape(part[:-2]) + ".*")
elif part.endswith("*"):
re_parts.append(re.escape(part[:-1]) + "[^/]*")
else:
re_parts.append(re.escape(part))
return re.compile("^%s$" % "/".join(re_parts))
def repo_files_changed(revish, include_uncommitted=False, include_new=False):
git = get_git_cmd(wpt_root)
files = git("diff", "--name-only", "-z", revish).split("\0")
assert not files[-1]
files = set(files[:-1])
if include_uncommitted:
entries = git("status", "-z").split("\0")
assert not entries[-1]
entries = entries[:-1]
for item in entries:
status, path = item.split()
if status == "??" and not include_new:
continue
else:
if not os.path.isdir(path):
files.add(path)
else:
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
files.add(os.path.join(dirpath, filename))
return files
def exclude_ignored(files, ignore_rules):
if ignore_rules is None:
ignore_rules = []
ignore_rules = [compile_ignore_rule(item) for item in ignore_rules]
changed = []
ignored = []
for item in sorted(files):
fullpath = os.path.join(wpt_root, item)
rule_path = item.replace(os.path.sep, "/")
for rule in ignore_rules:
if rule.match(rule_path):
ignored.append(fullpath)
break
else:
changed.append(fullpath)
return changed, ignored
def files_changed(revish, ignore_rules=None, include_uncommitted=False, include_new=False):
"""Get and return files changed since current branch diverged from master,
excluding those that are located within any path matched by
`ignore_rules`."""
files = repo_files_changed(revish,
include_uncommitted=include_uncommitted,
include_new=include_new)
if not files:
return [], []
return exclude_ignored(files, ignore_rules)
def _in_repo_root(full_path):
rel_path = os.path.relpath(full_path, wpt_root)
path_components = rel_path.split(os.sep)
return len(path_components) < 2
def _init_manifest_cache():
c = {}
def load(manifest_path=None):
if manifest_path is None:
manifest_path = os.path.join(wpt_root, "MANIFEST.json")
if c.get(manifest_path):
return c[manifest_path]
# cache at most one path:manifest
c.clear()
wpt_manifest = manifest.load(wpt_root, manifest_path)
if wpt_manifest is None:
wpt_manifest = manifest.Manifest()
update.update(wpt_root, wpt_manifest)
c[manifest_path] = wpt_manifest
return c[manifest_path]
return load
load_manifest = _init_manifest_cache()
def affected_testfiles(files_changed, skip_tests, manifest_path=None):
"""Determine and return list of test files that reference changed files."""
affected_testfiles = set()
# Exclude files that are in the repo root, because
# they are not part of any test.
files_changed = [f for f in files_changed if not _in_repo_root(f)]
nontests_changed = set(files_changed)
wpt_manifest = load_manifest(manifest_path)
test_types = ["testharness", "reftest", "wdspec"]
support_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes("support")}
wdspec_test_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes("wdspec")}
test_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes(*test_types)}
interface_dir = os.path.join(wpt_root, 'interfaces')
interfaces_files = {os.path.join(wpt_root, 'interfaces', filename)
for filename in os.listdir(interface_dir)}
interfaces_changed = interfaces_files.intersection(nontests_changed)
nontests_changed = nontests_changed.intersection(support_files)
tests_changed = set(item for item in files_changed if item in test_files)
nontest_changed_paths = set()
rewrites = {"/resources/webidl2/lib/webidl2.js": "/resources/WebIDLParser.js"}
for full_path in nontests_changed:
rel_path = os.path.relpath(full_path, wpt_root)
path_components = rel_path.split(os.sep)
top_level_subdir = path_components[0]
if top_level_subdir in skip_tests:
continue
repo_path = "/" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, "/")
if repo_path in rewrites:
repo_path = rewrites[repo_path]
full_path = os.path.join(wpt_root, repo_path[1:].replace("/", os.path.sep))
nontest_changed_paths.add((full_path, repo_path))
interface_name = lambda x: os.path.splitext(os.path.basename(x))[0]
interfaces_changed_names = map(interface_name, interfaces_changed)
def affected_by_wdspec(test):
affected = False
if test in wdspec_test_files:
for support_full_path, _ in nontest_changed_paths:
# parent of support file or of "support" directory
parent = os.path.dirname(support_full_path)
if os.path.basename(parent) == "support":
parent = os.path.dirname(parent)
relpath = os.path.relpath(test, parent)
if not relpath.startswith(os.pardir):
# testfile is in subtree of support file
affected = True
break
return affected
def affected_by_interfaces(file_contents):
if len(interfaces_changed_names) > 0:
if 'idlharness.js' in file_contents:
for interface in interfaces_changed_names:
regex = '[\'"]' + interface + '(\\.idl)?[\'"]'
if re.search(regex, file_contents):
affected_testfiles.add(test_full_path)
break
for root, dirs, fnames in os.walk(wpt_root):
# Walk top_level_subdir looking for test files containing either the
# relative filepath or absolute filepath to the changed files.
if root == wpt_root:
for dir_name in skip_tests:
dirs.remove(dir_name)
for fname in fnames:
test_full_path = os.path.join(root, fname)
# Skip any file that's not a test file.
if test_full_path not in test_files:
continue
if affected_by_wdspec(test_full_path):
affected_testfiles.add(test_full_path)
continue
with open(test_full_path, "rb") as fh:
file_contents = fh.read()
if file_contents.startswith("\xfe\xff"):
file_contents = file_contents.decode("utf-16be", "replace")
elif file_contents.startswith("\xff\xfe"):
file_contents = file_contents.decode("utf-16le", "replace")
else:
file_contents = file_contents.decode("utf8", "replace")
for full_path, repo_path in nontest_changed_paths:
rel_path = os.path.relpath(full_path, root).replace(os.path.sep, "/")
if rel_path in file_contents or repo_path in file_contents or affected_by_interfaces(file_contents):
affected_testfiles.add(test_full_path)
continue
return tests_changed, affected_testfiles
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("revish", default=None, help="Commits to consider. Defaults to the "
"commits on the current branch", nargs="?")
parser.add_argument("--ignore-rules", nargs="*", type=set,
default=set(["resources/testharness*"]),
help="Rules for paths to exclude from lists of changes. Rules are paths "
"relative to the test root, with * before a separator or the end matching "
"anything other than a path separator and ** in that position matching "
"anything")
parser.add_argument("--modified", action="store_true",
help="Include files under version control that have been "
"modified or staged")
parser.add_argument("--new", action="store_true",
help="Include files in the worktree that are not in version control")
parser.add_argument("--show-type", action="store_true",
help="Print the test type along with each affected test")
parser.add_argument("--null", action="store_true",
help="Separate items with a null byte")
return parser
def get_parser_affected():
parser = get_parser()
parser.add_argument("--metadata",
dest="metadata_root",
action="store",
default=wpt_root,
help="Directory that will contain MANIFEST.json")
return parser
def get_revish(**kwargs):
revish = kwargs["revish"]
if kwargs["revish"] is None:
revish = "%s..HEAD" % branch_point()
return revish
def run_changed_files(**kwargs):
revish = get_revish(**kwargs)
changed, _ = files_changed(revish, kwargs["ignore_rules"],
include_uncommitted=kwargs["modified"],
include_new=kwargs["new"])
separator = "\0" if kwargs["null"] else "\n"
for item in sorted(changed):
sys.stdout.write(os.path.relpath(item, wpt_root) + separator)
def run_tests_affected(**kwargs):
revish = get_revish(**kwargs)
changed, _ = files_changed(revish, kwargs["ignore_rules"],
include_uncommitted=kwargs["modified"],
include_new=kwargs["new"])
manifest_path = os.path.join(kwargs["metadata_root"], "MANIFEST.json")
tests_changed, dependents = affected_testfiles(
changed,
set(["conformance-checkers", "docs", "tools"]),
manifest_path=manifest_path
)
message = "{path}"
if kwargs["show_type"]:
wpt_manifest = load_manifest(manifest_path)
message = "{path}\t{item_type}"
message += "\0" if kwargs["null"] else "\n"
for item in sorted(tests_changed | dependents):
results = {
"path": os.path.relpath(item, wpt_root)
}
if kwargs["show_type"]:
item_types = {i.item_type for i in wpt_manifest.iterpath(results["path"])}
if len(item_types) != 1:
item_types = [" ".join(item_types)]
results["item_type"] = item_types.pop()
sys.stdout.write(message.format(**results))
| 39.755208 | 120 | 0.608869 |
ace0180b63a5a5229afe01bc7dc9b1ddef4beed5 | 5,704 | py | Python | us-weather-history/wunderground_parser.py | MykolaHerasymovych/data | 328eda6f1ca6c8d88254171202bedb38670f71d0 | [
"MIT"
] | 1 | 2019-10-06T13:50:06.000Z | 2019-10-06T13:50:06.000Z | us-weather-history/wunderground_parser.py | MykolaHerasymovych/data | 328eda6f1ca6c8d88254171202bedb38670f71d0 | [
"MIT"
] | null | null | null | us-weather-history/wunderground_parser.py | MykolaHerasymovych/data | 328eda6f1ca6c8d88254171202bedb38670f71d0 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from urllib.request import urlopen
def parse_pages(station):
'''
This function parses the web pages downloaded from wunderground.com
into a flat CSV file for the station you provide it.
Make sure to run the wunderground scraper first so you have the web
pages downloaded.
'''
# Scrape between July 1, 2014 and July 1, 2015
# You can change the dates here if you prefer to parse a different range
current_date = datetime(year=2014, month=7, day=1)
end_date = datetime(year=2015, month=7, day=1)
with open('{}.csv'.format(station), 'w') as out_file:
out_file.write('date,actual_mean_temp,actual_min_temp,actual_max_temp,'
'average_min_temp,average_max_temp,'
'record_min_temp,record_max_temp,'
'record_min_temp_year,record_max_temp_year,'
'actual_precipitation,average_precipitation,'
'record_precipitation\n')
while current_date != end_date:
try_again = False
with open('{}/{}-{}-{}.html'.format(station,
current_date.year,
current_date.month,
current_date.day)) as in_file:
soup = BeautifulSoup(in_file.read(), 'html.parser')
weather_data = soup.find(id='historyTable').find_all('span', class_='wx-value')
weather_data_units = soup.find(id='historyTable').find_all('td')
try:
actual_mean_temp = weather_data[0].text
actual_max_temp = weather_data[2].text
average_max_temp = weather_data[3].text
record_max_temp = weather_data[4].text
actual_min_temp = weather_data[5].text
average_min_temp = weather_data[6].text
record_min_temp = weather_data[7].text
record_max_temp_year = weather_data_units[
9].text.split('(')[-1].strip(')')
record_min_temp_year = weather_data_units[
13].text.split('(')[-1].strip(')')
actual_precipitation = weather_data[9].text
if actual_precipitation == 'T':
actual_precipitation = '0.0'
average_precipitation = weather_data[10].text
record_precipitation = weather_data[11].text
# Verify that the parsed data is valid
if (record_max_temp_year == '-1' or record_min_temp_year == '-1' or
int(record_max_temp) < max(int(actual_max_temp), int(average_max_temp)) or
int(record_min_temp) > min(int(actual_min_temp), int(average_min_temp)) or
float(actual_precipitation) > float(record_precipitation) or
float(average_precipitation) > float(record_precipitation)):
raise Exception
out_file.write('{}-{}-{},'.format(current_date.year, current_date.month, current_date.day))
out_file.write(','.join([actual_mean_temp, actual_min_temp, actual_max_temp,
average_min_temp, average_max_temp,
record_min_temp, record_max_temp,
record_min_temp_year, record_max_temp_year,
actual_precipitation, average_precipitation,
record_precipitation]))
out_file.write('\n')
current_date += timedelta(days=1)
except:
# If the web page is formatted improperly, signal that the page may need
# to be downloaded again.
try_again = True
# If the web page needs to be downloaded again, re-download it from
# wunderground.com
# If the parser gets stuck on a certain date, you may need to investigate
# the page to find out what is going on. Sometimes data is missing, in
# which case the parser will get stuck. You can manually put in the data
# yourself in that case, or just tell the parser to skip this day.
if try_again:
print('Error with date {}'.format(current_date))
lookup_URL = 'http://www.wunderground.com/history/airport/{}/{}/{}/{}/DailyHistory.html'
formatted_lookup_URL = lookup_URL.format(station,
current_date.year,
current_date.month,
current_date.day)
html = urlopen(formatted_lookup_URL).read().decode('utf-8')
out_file_name = '{}/{}-{}-{}.html'.format(station,
current_date.year,
current_date.month,
current_date.day)
with open(out_file_name, 'w') as out_file:
out_file.write(html)
# Parse the stations used in this article
for station in ['KCLT', 'KCQT', 'KHOU', 'KIND', 'KJAX',
'KMDW', 'KNYC', 'KPHL', 'KPHX', 'KSEA']:
parse_station(station)
| 51.854545 | 111 | 0.521564 |
ace0184b27c49f106ce8ce8caadc7477fc504255 | 12,786 | py | Python | tests/functional_tests.py | fountainment/fontbm | 03696da3012af0a9a459243ed6d2cca4846919b2 | [
"MIT"
] | 148 | 2018-01-22T08:59:11.000Z | 2022-03-29T05:12:49.000Z | tests/functional_tests.py | fountainment/fontbm | 03696da3012af0a9a459243ed6d2cca4846919b2 | [
"MIT"
] | 18 | 2017-09-21T15:41:26.000Z | 2022-03-10T12:08:39.000Z | tests/functional_tests.py | fountainment/fontbm | 03696da3012af0a9a459243ed6d2cca4846919b2 | [
"MIT"
] | 21 | 2017-12-12T20:01:08.000Z | 2022-02-17T20:56:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import subprocess
import difflib
import os.path
import filecmp
import shutil
import shlex
import xml.etree.ElementTree as ET
import json
import struct
# from collections.abc import Iterable
from collections import abc
def diff(expected, generated):
with open(expected, 'r') as a:
with open(generated, 'r') as b:
d = list(difflib.unified_diff(
a.readlines(),
b.readlines(),
fromfile=expected,
tofile=generated,
n=0
))
for line in d:
sys.stdout.write(line)
return bool(d)
def check_diff(expected, generated, binary=False):
assert os.path.isfile(expected)
assert os.path.isfile(generated)
different = (not filecmp.cmp(expected, generated)) if binary else diff(expected, generated)
if different:
raise RuntimeError('generated data is not equal for expected ' + generated)
def clear_work_dir():
shutil.rmtree('generated', ignore_errors=True)
os.makedirs('generated')
def test_expected(font_exe, env):
clear_work_dir()
subprocess.run([font_exe, '--font-file', 'fonts/FreeSans.ttf', '--chars', '32-126',
'--padding-up', '8', '--padding-right', '7', '--padding-down', '6', '--padding-left', '5',
'--output', 'generated/test0', '--include-kerning-pairs'], check=True, env=env)
check_diff('expected/test0.fnt', 'generated/test0.fnt')
subprocess.run([font_exe, '--font-file', 'fonts/FreeSans.ttf', '--chars', '32-126',
'--output', 'generated/test1', '--include-kerning-pairs',
'--padding-up', '8', '--padding-right', '7',
'--data-format', 'xml'], check=True, env=env)
check_diff('expected/test1.fnt', 'generated/test1.fnt')
subprocess.run([font_exe, '--font-file', 'fonts/FreeSans.ttf', '--chars', '32-126',
'--output', 'generated/test2', '--include-kerning-pairs',
'--spacing-vert', '4', '--spacing-horiz', '5',
'--data-format', 'bin'], check=True, env=env)
check_diff('expected/test2.fnt', 'generated/test2.fnt', True)
def test_too_many_textures(font_exe, env):
process = subprocess.Popen([font_exe, '--font-file', 'fonts/FreeSans.ttf', '--chars', '32-126',
'--output', 'generated/test3', '--include-kerning-pairs',
'--texture-width', '128', '--texture-height', '128',
'--max-texture-count', '1'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env, text=True)
out, err = process.communicate()
assert out == ''
assert err.strip() == 'too many generated textures (more than --max-texture-count)'
assert process.returncode == 1
def read_xml(path):
font_info = {
'pages': [],
'chars': [],
'kernings': [],
}
tree = ET.parse(path)
info_tag = tree.find('info')
font_info['info'] = {k: int(v) for (k, v) in info_tag.attrib.items() if k not in ['face', 'charset',
'padding', 'spacing']}
font_info['info']['face'] = info_tag.attrib['face']
font_info['info']['charset'] = info_tag.attrib['charset']
font_info['info']['padding'] = [int(n) for n in info_tag.attrib['padding'].split(',')]
font_info['info']['spacing'] = [int(n) for n in info_tag.attrib['spacing'].split(',')]
font_info['common'] = {k: int(v) for (k, v) in tree.find('common').attrib.items()}
for elem in tree.iterfind('pages/page'):
font_info['pages'].append({
'id': int(elem.attrib['id']),
'file': elem.attrib['file']
})
for char in tree.iterfind('chars/char'):
font_info['chars'].append({k: int(v) for (k, v) in char.attrib.items()})
for kerning in tree.iterfind('kernings/kerning'):
font_info['kernings'].append({k: int(v) for (k, v) in kerning.attrib.items()})
return font_info
def read_txt(path):
font_info = {
'pages': [],
'chars': [],
'kernings': [],
}
with open(path) as f:
while True:
line = f.readline()
if not line:
break
# https://github.com/robhagemans/monobit/blob/7ac7af38f278ccc26eece442027d8aaaa40985a2/monobit/bmfont.py#L270
tag, text_dict = line.strip().split(' ', 1)
text_dict = dict(item.split('=') for item in shlex.split(text_dict) if item)
if tag == 'info':
font_info[tag] = {k: int(v) for (k, v) in text_dict.items() if k not in ['face', 'charset',
'padding', 'spacing']}
font_info[tag]['face'] = text_dict['face']
font_info[tag]['charset'] = text_dict['charset']
font_info[tag]['padding'] = [int(n) for n in text_dict['padding'].split(',')]
font_info[tag]['spacing'] = [int(n) for n in text_dict['spacing'].split(',')]
if tag == 'common':
font_info[tag] = {k: int(v) for (k, v) in text_dict.items()}
elif tag == 'page':
font_info['pages'].append({
'id': int(text_dict['id']),
'file': text_dict['file']
})
elif tag == 'char':
font_info['chars'].append({k: int(v) for (k, v) in text_dict.items()})
elif tag == 'kerning':
font_info['kernings'].append({k: int(v) for (k, v) in text_dict.items()})
return font_info
def read_json(path):
with open(path) as f:
data = json.load(f)
for i in range(len(data['pages'])):
data['pages'][i] = {'file': data['pages'][i], 'id': i}
return data
def get_charset_name(char_set):
d = {
0: 'ANSI',
1: 'DEFAULT',
2: 'SYMBOL',
128: 'SHIFTJIS',
129: 'HANGUL',
134: 'GB2312',
136: 'CHINESEBIG5',
255: 'OEM',
130: 'JOHAB',
177: 'HEBREW',
178: 'ARABIC',
161: 'GREEK',
162: 'TURKISH',
163: 'VIETNAMESE',
222: 'THAI',
238: 'EASTEUROPE',
204: 'RUSSIAN',
77: 'MAC',
186: 'BALTIC'
}
return d[char_set]
def read_bin_string(b):
r = ''
i = 0
while b[i] != 0:
r += chr(b[i])
i += 1
return b[i + 1:], r
def read_bin(path):
result = {}
with open(path, mode='rb') as f:
b = f.read()
assert b[:4] == b'BMF\x03'
b = b[4:]
block_type, block_size = struct.unpack('<Bi', b[:5])
assert block_type == 1
b = b[5:]
names = ('size', 'bit_field', 'charset', 'stretchH', 'aa', 'paddingUp',
'paddingRight', 'paddingDown', 'paddingLeft', 'spacingHor', 'spacingVer',
'outline')
data = struct.unpack('<hBBHBBBBBBBB', b[:14])
b = b[14:]
d = dict(zip(names, data))
d['padding'] = [d['paddingUp'], d['paddingRight'], d['paddingDown'], d['paddingLeft']]
del d['paddingUp']
del d['paddingRight']
del d['paddingDown']
del d['paddingLeft']
d['spacing'] = [d['spacingHor'], d['spacingVer']]
del d['spacingHor']
del d['spacingVer']
d['smooth'] = 1 if d['bit_field'] & (1 << 0) else 0
d['unicode'] = 1 if d['bit_field'] & (1 << 1) else 0
d['italic'] = 1 if d['bit_field'] & (1 << 2) else 0
d['bold'] = 1 if d['bit_field'] & (1 << 3) else 0
del d['bit_field']
b, d['face'] = read_bin_string(b)
d['charset'] = '' if d['unicode'] else get_charset_name(d['charset'])
result['info'] = d
block_type, block_size = struct.unpack('<Bi', b[:5])
assert block_type == 2
b = b[5:]
names = ('lineHeight', 'base', 'scaleW', 'scaleH', 'pages', 'bit_field',
'alphaChnl', 'redChnl', 'greenChnl', 'blueChnl')
data = struct.unpack('<HHHHHBBBBB', b[:15])
b = b[15:]
d = dict(zip(names, data))
d['packed'] = 1 if d['bit_field'] & (1 << 7) else 0
del d['bit_field']
result['common'] = d
block_type, block_size = struct.unpack('<Bi', b[:5])
b = b[5:]
assert block_type == 3
pages = []
i = 0
while True:
b, file_name = read_bin_string(b)
# TODO: check if there is \0 when there are no pages
if not file_name:
break
pages.append({
'id': i,
'file': file_name
})
block_size -= len(file_name)
if block_size < len(file_name):
break
i += 1
result['pages'] = pages
block_type, block_size = struct.unpack('<Bi', b[:5])
b = b[5:]
# print(block_type, block_size)
assert block_type == 4
assert block_size % 20 == 0
char_count = block_size // 20
chars = []
for i in range(char_count):
names = ('id', 'x', 'y', 'width', 'height', 'xoffset',
'yoffset', 'xadvance', 'page', 'chnl')
data = struct.unpack('<IHHHHhhhBB', b[:20])
b = b[20:]
chars.append(dict(zip(names, data)))
result['chars'] = chars
assert not b''
assert b'A'
if b:
block_type, block_size = struct.unpack('<Bi', b[:5])
assert block_type == 5
b = b[5:]
assert block_size % 10 == 0
kerning_count = block_size // 10
kernings = []
for i in range(kerning_count):
names = ('first', 'second', 'amount')
data = struct.unpack('<IIh', b[:10])
b = b[10:]
kernings.append(dict(zip(names, data)))
result['kernings'] = kernings
assert not b
return result
def flatten(path, el):
if isinstance(el, abc.Mapping):
for key, value in el.items():
yield from flatten(path + [key], value)
elif isinstance(el, abc.Iterable) and not isinstance(el, (str, bytes)):
for i, value in enumerate(el):
yield from flatten(path + [i], value)
else:
yield path, el
def flatten_data(path):
with open(path) as f:
data = json.load(f)
for i in flatten([], data):
print(i)
def test_fnt_formats(font_exe, env):
clear_work_dir()
args = [font_exe, '--font-file', 'fonts/FreeSans.ttf', '--output', 'generated/format_test',
'--chars', '32-126', '--font-size', '16',
'--include-kerning-pairs',
'--padding-up', '1', '--padding-right', '2', '--padding-down', '3', '--padding-left', '4',
'--spacing-horiz', '5', '--spacing-vert', '6',
'--texture-width', '128', '--texture-height', '128']
subprocess.run(args + ['--data-format', 'txt'], check=True, env=env)
data_txt = read_txt('generated/format_test.fnt')
data_txt = json.dumps(data_txt, indent=4, sort_keys=True)
with open('generated/test_txt.json', 'w') as json_file:
json_file.write(data_txt)
subprocess.run(args + ['--data-format', 'xml'], check=True, env=env)
data_xml = read_xml('generated/format_test.fnt')
data_xml = json.dumps(data_xml, indent=4, sort_keys=True)
with open('generated/test_xml.json', 'w') as json_file:
json_file.write(data_xml)
subprocess.run(args + ['--data-format', 'json'], check=True, env=env)
data_json = read_json('generated/format_test.fnt')
data_json = json.dumps(data_json, indent=4, sort_keys=True)
with open('generated/test_json.json', 'w') as json_file:
json_file.write(data_json)
subprocess.run(args + ['--data-format', 'bin'], check=True, env=env)
data_bin = read_bin('generated/format_test.fnt')
data_bin = json.dumps(data_bin, indent=4, sort_keys=True)
with open('generated/test_bin.json', 'w') as json_file:
json_file.write(data_bin)
assert data_txt == data_json
assert data_txt == data_xml
assert data_txt == data_bin
def main(argv):
assert len(argv) == 3
font_exe = argv[1]
runtime_lib_dir = argv[2]
assert os.path.isfile(font_exe)
assert os.path.isdir(runtime_lib_dir)
env = os.environ.copy()
env['PATH'] = os.pathsep.join((runtime_lib_dir, env.get('PATH', '')))
test_expected(font_exe, env)
test_too_many_textures(font_exe, env)
test_fnt_formats(font_exe, env)
# flatten_data('generated/test_txt.json')
if __name__ == '__main__':
main(sys.argv)
| 35.815126 | 122 | 0.536133 |
ace01aa61357f4a9bbb1a586504336934437f508 | 1,325 | py | Python | tests/unit/runner/test_utils.py | ethan-asapp/flambe | 70257167058c7b82ee39f74167a6161bd264ad18 | [
"MIT"
] | 148 | 2019-08-29T21:19:03.000Z | 2022-03-18T06:13:53.000Z | tests/unit/runner/test_utils.py | ethan-asapp/flambe | 70257167058c7b82ee39f74167a6161bd264ad18 | [
"MIT"
] | 108 | 2019-09-03T14:36:10.000Z | 2020-05-13T15:53:14.000Z | tests/unit/runner/test_utils.py | ethan-asapp/flambe | 70257167058c7b82ee39f74167a6161bd264ad18 | [
"MIT"
] | 21 | 2019-09-08T14:09:45.000Z | 2020-12-27T04:12:33.000Z | import pytest
import tempfile
import os
from flambe.runner import utils
MB = 2 ** 20
def create_file(filename, size_MB=1):
# From https://stackoverflow.com/a/8816154
with open(filename, "wb") as out:
out.truncate(size_MB * MB)
@pytest.mark.parametrize("mbs", [1, 2, 3, 4])
def test_size_MB_file(mbs):
with tempfile.NamedTemporaryFile("wb") as t:
create_file(t.name, size_MB=mbs)
assert utils.get_size_MB(t.name) == mbs
@pytest.mark.parametrize("mbs", [1, 2, 3, 4])
def test_size_MB_folder(mbs):
with tempfile.TemporaryDirectory() as t:
create_file(os.path.join(t, '1.bin'), size_MB=mbs)
create_file(os.path.join(t, '2.bin'), size_MB=mbs)
create_file(os.path.join(t, '3.bin'), size_MB=mbs)
create_file(os.path.join(t, '4.bin'), size_MB=mbs)
assert utils.get_size_MB(t) == 4 * mbs
def test_get_files():
with tempfile.TemporaryDirectory() as t:
f1 = os.path.join(t, 'some_file.txt')
os.mkdir(os.path.join(t, 'folder'))
f2 = os.path.join(t, 'folder', 'some_file.txt')
open(f1, 'w+').close()
open(f2, 'w+').close()
assert list(utils.get_files(t)) == [f1, f2]
def test_get_files_invalid():
with pytest.raises(ValueError):
utils.get_files('/some/non/existent/path/to/test')
| 27.604167 | 58 | 0.636981 |
ace01b6f11a6b6e8919bea15891d3ebfee2bef91 | 14,953 | py | Python | pyvista/utilities/fileio.py | russelldj/pyvista | f489eedb2da2841cfd8ba99cf87528bfafa3a888 | [
"MIT"
] | 1 | 2021-06-03T15:56:35.000Z | 2021-06-03T15:56:35.000Z | pyvista/utilities/fileio.py | russelldj/pyvista | f489eedb2da2841cfd8ba99cf87528bfafa3a888 | [
"MIT"
] | 1 | 2022-03-30T14:03:23.000Z | 2022-03-30T14:03:23.000Z | pyvista/utilities/fileio.py | russelldj/pyvista | f489eedb2da2841cfd8ba99cf87528bfafa3a888 | [
"MIT"
] | null | null | null | """Contains a dictionary that maps file extensions to VTK readers."""
import pathlib
import os
import numpy as np
import vtk
import pyvista
VTK9 = vtk.vtkVersion().GetVTKMajorVersion() >= 9
READERS = {
# Standard dataset readers:
'.vtk': vtk.vtkDataSetReader,
'.pvtk': vtk.vtkPDataSetReader,
'.vti': vtk.vtkXMLImageDataReader,
'.pvti': vtk.vtkXMLPImageDataReader,
'.vtr': vtk.vtkXMLRectilinearGridReader,
'.pvtr': vtk.vtkXMLPRectilinearGridReader,
'.vtu': vtk.vtkXMLUnstructuredGridReader,
'.pvtu': vtk.vtkXMLPUnstructuredGridReader,
'.ply': vtk.vtkPLYReader,
'.obj': vtk.vtkOBJReader,
'.stl': vtk.vtkSTLReader,
'.vtp': vtk.vtkXMLPolyDataReader,
'.vts': vtk.vtkXMLStructuredGridReader,
'.vtm': vtk.vtkXMLMultiBlockDataReader,
'.vtmb': vtk.vtkXMLMultiBlockDataReader,
# Image formats:
'.bmp': vtk.vtkBMPReader,
'.dem': vtk.vtkDEMReader,
'.dcm': vtk.vtkDICOMImageReader,
'.img': vtk.vtkDICOMImageReader,
'.jpeg': vtk.vtkJPEGReader,
'.jpg': vtk.vtkJPEGReader,
'.mhd': vtk.vtkMetaImageReader,
'.nrrd': vtk.vtkNrrdReader,
'.nhdr': vtk.vtkNrrdReader,
'.png': vtk.vtkPNGReader,
'.pnm': vtk.vtkPNMReader, # TODO: not tested
'.slc': vtk.vtkSLCReader,
'.tiff': vtk.vtkTIFFReader,
'.tif': vtk.vtkTIFFReader,
# Other formats:
'.byu': vtk.vtkBYUReader, # TODO: not tested with this extension
'.g': vtk.vtkBYUReader,
# '.chemml': vtk.vtkCMLMoleculeReader, # TODO: not tested
# '.cml': vtk.vtkCMLMoleculeReader, # vtkMolecule is not supported by pyvista
# TODO: '.csv': vtk.vtkCSVReader, # vtkTables are currently not supported
'.facet': vtk.vtkFacetReader,
'.cas': vtk.vtkFLUENTReader, # TODO: not tested
# '.dat': vtk.vtkFLUENTReader, # TODO: not working
# '.cube': vtk.vtkGaussianCubeReader, # Contains `atom_types` which are note supported?
'.res': vtk.vtkMFIXReader, # TODO: not tested
'.foam': vtk.vtkOpenFOAMReader,
# '.pdb': vtk.vtkPDBReader, # Contains `atom_types` which are note supported?
'.p3d': vtk.vtkPlot3DMetaReader,
'.pts': vtk.vtkPTSReader,
# '.particles': vtk.vtkParticleReader, # TODO: not tested
#TODO: '.pht': vtk.vtkPhasta??????,
#TODO: '.vpc': vtk.vtkVPIC?????,
# '.bin': vtk.vtkMultiBlockPLOT3DReader,# TODO: non-default routine
'.tri': vtk.vtkMCubesReader,
'.inp': vtk.vtkAVSucdReader,
}
VTK_MAJOR = vtk.vtkVersion().GetVTKMajorVersion()
VTK_MINOR = vtk.vtkVersion().GetVTKMinorVersion()
if (VTK_MAJOR >= 8 and VTK_MINOR >= 2):
try:
READERS['.sgy'] = vtk.vtkSegYReader
READERS['.segy'] = vtk.vtkSegYReader
except AttributeError:
pass
def get_ext(filename):
"""Extract the extension of the filename."""
ext = os.path.splitext(filename)[1].lower()
return ext
def get_reader(filename):
"""Get the corresponding reader based on file extension and instantiates it."""
ext = get_ext(filename)
return READERS[ext]() # Get and instantiate the reader
def set_vtkwriter_mode(vtk_writer, use_binary=True):
"""Set any vtk writer to write as binary or ascii."""
if isinstance(vtk_writer, vtk.vtkDataWriter):
if use_binary:
vtk_writer.SetFileTypeToBinary()
else:
vtk_writer.SetFileTypeToASCII()
elif isinstance(vtk_writer, vtk.vtkXMLWriter):
if use_binary:
vtk_writer.SetDataModeToBinary()
else:
vtk_writer.SetDataModeToAscii()
return vtk_writer
def standard_reader_routine(reader, filename, attrs=None):
"""Use a given reader in the common VTK reading pipeline routine.
The reader must come from the ``READERS`` mapping.
Parameters
----------
reader : vtkReader
Any instantiated VTK reader class
filename : str
The string filename to the data file to read.
attrs : dict, optional
A dictionary of attributes to call on the reader. Keys of dictionary are
the attribute/method names and values are the arguments passed to those
calls. If you do not have any attributes to call, pass ``None`` as the
value.
"""
if attrs is None:
attrs = {}
if not isinstance(attrs, dict):
raise TypeError('Attributes must be a dictionary of name and arguments.')
reader.SetFileName(filename)
# Apply any attributes listed
for name, args in attrs.items():
attr = getattr(reader, name)
if args is not None:
if not isinstance(args, (list, tuple)):
args = [args]
attr(*args)
else:
attr()
# Perform the read
reader.Update()
return pyvista.wrap(reader.GetOutputDataObject(0))
def read_legacy(filename):
"""Use VTK's legacy reader to read a file."""
reader = vtk.vtkDataSetReader()
reader.SetFileName(filename)
# Ensure all data is fetched with poorly formatted legacy files
reader.ReadAllScalarsOn()
reader.ReadAllColorScalarsOn()
reader.ReadAllNormalsOn()
reader.ReadAllTCoordsOn()
reader.ReadAllVectorsOn()
# Perform the read
reader.Update()
output = reader.GetOutputDataObject(0)
if output is None:
raise RuntimeError('No output when using VTKs legacy reader')
return pyvista.wrap(output)
def read(filename, attrs=None, file_format=None):
"""Read any VTK file.
It will figure out what reader to use then wrap the VTK object for
use in PyVista.
Parameters
----------
filename : str
The string path to the file to read. If a list of files is
given, a :class:`pyvista.MultiBlock` dataset is returned with
each file being a separate block in the dataset.
attrs : dict, optional
A dictionary of attributes to call on the reader. Keys of
dictionary are the attribute/method names and values are the
arguments passed to those calls. If you do not have any
attributes to call, pass ``None`` as the value.
file_format : str, optional
Format of file to read with meshio.
Examples
--------
Load an example mesh
>>> import pyvista
>>> from pyvista import examples
>>> mesh = pyvista.read(examples.antfile)
Load a vtk file
>>> mesh = pyvista.read('my_mesh.vtk') # doctest:+SKIP
Load a meshio file
>>> mesh = pyvista.read("mesh.obj") # doctest:+SKIP
"""
if isinstance(filename, (list, tuple)):
multi = pyvista.MultiBlock()
for each in filename:
if isinstance(each, (str, pathlib.Path)):
name = os.path.basename(str(each))
else:
name = None
multi[-1, name] = read(each)
return multi
filename = os.path.abspath(os.path.expanduser(str(filename)))
if not os.path.isfile(filename):
raise FileNotFoundError(f'File ({filename}) not found')
ext = get_ext(filename)
# Read file using meshio.read if file_format is present
if file_format:
return read_meshio(filename, file_format)
# From the extension, decide which reader to use
if attrs is not None:
reader = get_reader(filename)
return standard_reader_routine(reader, filename, attrs=attrs)
elif ext in '.vti': # ImageData
return pyvista.UniformGrid(filename)
elif ext in '.vtr': # RectilinearGrid
return pyvista.RectilinearGrid(filename)
elif ext in '.vtu': # UnstructuredGrid
return pyvista.UnstructuredGrid(filename)
elif ext in ['.ply', '.obj', '.stl']: # PolyData
return pyvista.PolyData(filename)
elif ext in '.vts': # StructuredGrid
return pyvista.StructuredGrid(filename)
elif ext in ['.vtm', '.vtmb']:
return pyvista.MultiBlock(filename)
elif ext in ['.e', '.exo']:
return read_exodus(filename)
elif ext in ['.vtk']:
# Attempt to use the legacy reader...
return read_legacy(filename)
elif ext in ['.jpeg', '.jpg']:
return read_texture(filename).to_image()
else:
# Attempt find a reader in the readers mapping
try:
reader = get_reader(filename)
return standard_reader_routine(reader, filename)
except KeyError:
# Attempt read with meshio
try:
from meshio._exceptions import ReadError
try:
return read_meshio(filename)
except ReadError:
pass
except SyntaxError:
# https://github.com/pyvista/pyvista/pull/495
pass
raise IOError("This file was not able to be automatically read by pyvista.")
def read_texture(filename, attrs=None):
"""Load a ``vtkTexture`` from an image file."""
filename = os.path.abspath(os.path.expanduser(filename))
try:
# initialize the reader using the extension to find it
reader = get_reader(filename)
image = standard_reader_routine(reader, filename, attrs=attrs)
if image.n_points < 2:
raise RuntimeError("Problem reading the image with VTK.")
return pyvista.image_to_texture(image)
except (KeyError, RuntimeError):
# Otherwise, use the imageio reader
pass
import imageio
return pyvista.numpy_to_texture(imageio.imread(filename))
def read_exodus(filename,
animate_mode_shapes=True,
apply_displacements=True,
displacement_magnitude=1.0,
enabled_sidesets=None):
"""Read an ExodusII file (``'.e'`` or ``'.exo'``)."""
reader = vtk.vtkExodusIIReader()
reader.SetFileName(filename)
reader.UpdateInformation()
reader.SetAnimateModeShapes(animate_mode_shapes)
reader.SetApplyDisplacements(apply_displacements)
reader.SetDisplacementMagnitude(displacement_magnitude)
if enabled_sidesets is None:
enabled_sidesets = list(range(reader.GetNumberOfSideSetArrays()))
for sideset in enabled_sidesets:
if isinstance(sideset, int):
name = reader.GetSideSetArrayName(sideset)
elif isinstance(sideset, str):
name = sideset
else:
raise ValueError(f'Could not parse sideset ID/name: {sideset}')
reader.SetSideSetArrayStatus(name, 1)
reader.Update()
return pyvista.wrap(reader.GetOutput())
def from_meshio(mesh):
"""Convert a ``meshio`` mesh instance to a PyVista mesh."""
from meshio.vtk._vtk import (
meshio_to_vtk_type,
vtk_type_to_numnodes,
)
# Extract cells from meshio.Mesh object
offset = []
cells = []
cell_type = []
next_offset = 0
for c in mesh.cells:
vtk_type = meshio_to_vtk_type[c.type]
numnodes = vtk_type_to_numnodes[vtk_type]
cells.append(
np.hstack((np.full((len(c.data), 1), numnodes), c.data)).ravel()
)
cell_type += [vtk_type] * len(c.data)
if not VTK9:
offset += [next_offset + i * (numnodes + 1) for i in range(len(c.data))]
next_offset = offset[-1] + numnodes + 1
# Extract cell data from meshio.Mesh object
cell_data = {k: np.concatenate(v) for k, v in mesh.cell_data.items()}
# Create pyvista.UnstructuredGrid object
points = mesh.points
if points.shape[1] == 2:
points = np.hstack((points, np.zeros((len(points), 1))))
if VTK9:
grid = pyvista.UnstructuredGrid(
np.concatenate(cells),
np.array(cell_type),
np.array(points, np.float64),
)
else:
grid = pyvista.UnstructuredGrid(
np.array(offset),
np.concatenate(cells),
np.array(cell_type),
np.array(points, np.float64),
)
# Set point data
grid.point_arrays.update({k: np.array(v, np.float64) for k, v in mesh.point_data.items()})
# Set cell data
grid.cell_arrays.update(cell_data)
return grid
def read_meshio(filename, file_format=None):
"""Read any mesh file using meshio."""
import meshio
# Make sure relative paths will work
filename = os.path.abspath(os.path.expanduser(str(filename)))
# Read mesh file
mesh = meshio.read(filename, file_format)
return from_meshio(mesh)
def save_meshio(filename, mesh, file_format = None, **kwargs):
"""Save mesh to file using meshio.
Parameters
----------
mesh : pyvista.Common
Any PyVista mesh/spatial data type.
file_format : str
File type for meshio to save.
"""
import meshio
from meshio.vtk._vtk import vtk_to_meshio_type
# Make sure relative paths will work
filename = os.path.abspath(os.path.expanduser(str(filename)))
# Cast to pyvista.UnstructuredGrid
if not isinstance(mesh, pyvista.UnstructuredGrid):
mesh = mesh.cast_to_unstructured_grid()
# Copy useful arrays to avoid repeated calls to properties
vtk_offset = mesh.offset
vtk_cells = mesh.cells
vtk_cell_type = mesh.celltypes
# Check that meshio supports all cell types in input mesh
pixel_voxel = {8, 11} # Handle pixels and voxels
for cell_type in np.unique(vtk_cell_type):
if cell_type not in vtk_to_meshio_type.keys() and cell_type not in pixel_voxel:
raise TypeError(f"meshio does not support VTK type {cell_type}.")
# Get cells
cells = []
c = 0
for offset, cell_type in zip(vtk_offset, vtk_cell_type):
numnodes = vtk_cells[offset+c]
if VTK9: # must offset by cell count
cell = vtk_cells[offset+1+c:offset+1+c+numnodes]
c += 1
else:
cell = vtk_cells[offset+1:offset+1+numnodes]
cell = (
cell if cell_type not in pixel_voxel
else cell[[0, 1, 3, 2]] if cell_type == 8
else cell[[0, 1, 3, 2, 4, 5, 7, 6]]
)
cell_type = cell_type if cell_type not in pixel_voxel else cell_type+1
cell_type = (
vtk_to_meshio_type[cell_type] if cell_type != 7
else f"polygon{numnodes}"
)
if len(cells) > 0 and cells[-1][0] == cell_type:
cells[-1][1].append(cell)
else:
cells.append((cell_type, [cell]))
for k, c in enumerate(cells):
cells[k] = (c[0], np.array(c[1]))
# Get point data
point_data = {k.replace(" ", "_"): v for k, v in mesh.point_arrays.items()}
# Get cell data
vtk_cell_data = mesh.cell_arrays
n_cells = np.cumsum([len(c[1]) for c in cells[:-1]])
cell_data = (
{k.replace(" ", "_"): np.split(v, n_cells) for k, v in vtk_cell_data.items()}
if vtk_cell_data
else {}
)
# Save using meshio
meshio.write_points_cells(
filename=filename,
points=np.array(mesh.points),
cells=cells,
point_data=point_data,
cell_data=cell_data,
file_format=file_format,
**kwargs
)
| 32.791667 | 94 | 0.634588 |
ace01bc0c7663dee426c826749a676c5f9000e03 | 298 | py | Python | wafw00f/plugins/maxcdn.py | biscuitehh/wafw00f | b1a08122ea3d65e2aaaa5120231cca6c37851c5b | [
"BSD-3-Clause"
] | 1 | 2020-01-17T08:09:48.000Z | 2020-01-17T08:09:48.000Z | wafw00f/plugins/maxcdn.py | tlsloves/wafw00f | 9682cdbdffc78150719b58390f8c5552b40a40b6 | [
"BSD-3-Clause"
] | null | null | null | wafw00f/plugins/maxcdn.py | tlsloves/wafw00f | 9682cdbdffc78150719b58390f8c5552b40a40b6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Copyright (C) 2019, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'MaxCDN (MaxCDN)'
def is_waf(self):
schemes = [
self.matchHeader(('X-CDN', r'maxcdn'))
]
if any(i for i in schemes):
return True
return False | 18.625 | 46 | 0.620805 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.