blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd191f1c5da210ada72d3d2402ec8aabc3981132
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/datamanager/collection/cmdb/base.py
|
91d97111556602a33806e86c1221d0ff056a629e
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 8,137
|
py
|
base.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import logging
import time
from functools import lru_cache
from typing import List, Optional
import attr
from cached_property import cached_property_with_ttl
from api import cmdb_api
from collection.common.collect import BKDRawDataCollector
from collection.common.exceptions import CMDBApiResultError
from collection.conf.constants import CMDB_FIELDS_TTL, cmdb_cursor_key_template
from common.redis import connections as redis_connections
logger = logging.getLogger(__name__)
WATCH_CURSOR_TIMEOUT = 3600 * 3
@lru_cache(maxsize=None)
def get_bk_biz_ids():
return cmdb_api.get_all_business_ids()
@attr.s
class Signal:
is_active = attr.ib(type=bool)
class CMDBBaseCollector(BKDRawDataCollector):
object_type = ""
fields = []
key_name = None
@property
def bk_biz_ids(self):
return get_bk_biz_ids()
def filter_columns(self, object_info_dict):
"""
对于批量采集可能有指定字段以外的用户自定义字段查询到,将其归类到 customize 下
"""
customize_info = {
host_key: object_info_dict[host_key]
for host_key in object_info_dict
if host_key not in self.fields
}
for customize_key in customize_info.keys():
object_info_dict.pop(customize_key)
object_info_dict["customize"] = json.dumps(customize_info)
return object_info_dict
def report(self, batch_report_default=False):
"""
进行全量采集上报并衔接增量采集上报
"""
start_time = int(time.time())
# 仅对无 cursor / cursor 超时进行全量上报 / 强制要求全量上报
if not self.exist_watcher() or batch_report_default:
self.batch_report()
self.watch_report(start_time)
def batch_report(self):
pass
def watch_report(self, start_time):
"""
通用的单对象增量采集上报方法
"""
watcher = CMDBResourceWatcher(
object_type=self.object_type, fields=self.fields, key_name=self.key_name
)
watcher.watch_loop(start_time, self.handle_event)
def handle_event(self, event):
"""
由子类负责事件的处理函数
:param event: CMDB 同步事件
"""
pass
def exist_watcher(self) -> bool:
"""
是否有激活的游标
"""
watcher = CMDBResourceWatcher(self.object_type, self.fields)
return bool(watcher.get_cursor_id())
class CMDBResourceWatcher:
def __init__(self, object_type: str, fields: Optional[List] = None, key_name=None):
self.object_type = object_type
self.fields = fields
self.redis_connection = redis_connections["default"]
self.key_name = key_name
@cached_property_with_ttl(ttl=CMDB_FIELDS_TTL)
def cmdb_obj_fields(self):
# 当动态获取出错时使用默认 fields 代替
try:
fields_list = cmdb_api.get_object_fields_list(
bk_obj_id=self.object_type, raise_exception=True
)
if self.key_name and self.key_name not in fields_list:
fields_list.append(self.key_name)
fields_list.extend(["create_time", "last_time"])
return fields_list
except Exception as e:
logger.warning(
f"Refresh {self.object_type} fields list from cmdb failed for error {e}"
)
return self.fields
def watch_loop(self, start_time: int, handle_event, signal=None, sync_fields=True):
"""
使用 cmdb watch 接口进行增量采集
:param start_time: 开始监听的时间点,毫秒级的时间戳
:param handle_event: 事件处理函数
:param signal: 外部信号对象,需要阶段性检查是否有中断标识
"""
if signal is None:
signal = Signal(is_active=True)
base_watch_params = {
"bk_event_types": ["create", "update", "delete"],
"bk_resource": self.object_type,
}
cursor_id = self.get_cursor_id()
fail_watch_count = 0
while signal.is_active:
base_watch_params["bk_fields"] = (
self.cmdb_obj_fields if sync_fields else self.fields
)
if not cursor_id:
watch_params = {**base_watch_params, **{"bk_start_from": start_time}}
else:
watch_params = {**base_watch_params, **{"bk_cursor": cursor_id}}
response = cmdb_api.resource_watch(watch_params)
if response.code == 0 and response.data and response.data["bk_events"]:
events = response.data["bk_events"]
# 当 bk_watched 为 true, 表明已经监听到了事件,bk_events 中为事件详情列表
if response.data["bk_watched"]:
success_count = 0
for event in events:
try:
handle_event(event)
except Exception as err:
logger.exception(
f"[Watcher:{self.object_type}] Fail to handler event({event}), {err}"
)
else:
success_count += 1
logger.info(
f"[Watcher:{self.object_type}] Now process {success_count} events watched."
)
cursor_id = events[-1]["bk_cursor"]
self.set_cursor_id(cursor_id)
else:
logger.exception(
f"[Watcher:{self.object_type}] Fail to watch events, {response.response}"
)
fail_watch_count += 1
if fail_watch_count > 120:
raise CMDBApiResultError(message=response.response)
time.sleep(30)
def get_cursor_id(self):
"""
获取指定 watch 任务的 cursor, key 为 bkpub_collect_cmdb_cursor_{object_type}, value 为cursor_id
"""
cursor_id = self.redis_connection.get(
cmdb_cursor_key_template.format(object_type=self.object_type)
)
return cursor_id
def set_cursor_id(self, cursor_id):
"""
存入指定 watch 任务的 cursor, key 为 bkpub_collect_cmdb_cursor_{object_type}, value 为 cursor_id
如 cursor 超时(当前配置为三小时) 将释放
"""
self.redis_connection.setex(
name=cmdb_cursor_key_template.format(object_type=self.object_type),
value=cursor_id,
time=WATCH_CURSOR_TIMEOUT,
)
|
75f8e9237ffbb5bbe27e4ea79d10948782caafb6
|
c7a993dcd7efa07038a77ba7057b04b9450be550
|
/marching_cube/val.py
|
e2feeda6da81a082b83e4a33f3c149945ca788d7
|
[
"MIT"
] |
permissive
|
yiyiliao/deep_marching_cubes
|
6d5f889a45367239c719892c426bfd8a7865a4db
|
e6efeb4da828403442b930709666f51a2476636c
|
refs/heads/master
| 2023-03-10T19:21:12.207458
| 2023-02-28T01:50:51
| 2023-02-28T01:50:51
| 137,336,879
| 256
| 36
| null | 2020-09-17T19:00:11
| 2018-06-14T09:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,077
|
py
|
val.py
|
import numpy as np
import torch
from torch.autograd import Variable
import sys
import os
sys.path.append(os.path.join(os.getcwd(), 'model/cffi'))
import matplotlib as mpl
mpl.use('Agg')
from utils.visualize import save_occupancy_fig, save_mesh_fig
from utils.config import parse_args
from model.table import get_accept_topology
from model.loss import Loss
from data.data_loader import load_data, get_batch
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
dtype_long = torch.cuda.LongTensor
else:
dtype = torch.FloatTensor
dtype_long = torch.LongTensor
def run_val(model, loss_obj, data_val, args, phase='train'):
""" Test with the trained model
Input:
model: the Deep Marching Cubes model
loss_obj: the loss instance
data_val: validation data blob, including points and voxel grid
args: configuration arguments
phase: 'train' or 'val'
"""
max_prob = []
# evaluation
loss_eval = 0
for itest in range(args.num_val):
sys.stdout.write('.')
sys.stdout.flush()
itest_ = Variable(dtype_long([itest]))
net_input, pts_rnd, _ = get_batch(data_val, itest_, args)
offset, topology, occupancy = model(net_input)
loss = loss_obj.loss_eval(offset[0], topology[0], pts_rnd[0])
loss_eval += loss.data.cpu()[0]
interval = args.verbose_interval
if phase == 'val':
interval = 1
if np.mod(itest, interval) == 0:
topology_fused = topology[-1].data.cpu().numpy()
topology_fused = np.maximum(topology_fused[:, 0:128],
topology_fused[:, 256:127:-1])
topology_fused = topology_fused[:, get_accept_topology()]
save_occupancy_fig(
pts_rnd[-1].data.cpu().numpy(),
occupancy[-1].data.cpu().numpy(),
loss_obj.x_grids,
loss_obj.y_grids,
loss_obj.z_grids,
itest, args, 'val')
topology_vis = topology[:, :, loss_obj.visTopology]
save_mesh_fig(
pts_rnd[-1].data.cpu().numpy(),
offset[-1],
topology_vis[-1],
loss_obj.x_grids,
loss_obj.y_grids,
loss_obj.z_grids,
itest, args, 'val')
print('')
return loss_eval
if __name__ == '__main__':
# parse args
args = parse_args()
# load data
args, data_val = load_data(args, dtype, 'val')
# setup loss object
loss_obj = Loss(args)
# initialize the model
assert(os.path.isfile(args.model))
print "Validating with snapshotted model %s ..." % args.model
deep_marching_cubes = torch.load(args.model)
if torch.cuda.is_available():
deep_marching_cubes.cuda()
# validation
loss = run_val(deep_marching_cubes, loss_obj, data_val, args, 'val')
print('============== average loss:%f' % (loss/args.num_val))
print 'Done!'
|
b572663262a4786a27825cdfcbee90d528499456
|
807438e6974bf68762208ec24cf824dd0e5fabd6
|
/libcloud/test/storage/test_digitalocean_spaces.py
|
4c8146a68c01fb705405152b3ec1f6ca8121a37e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/libcloud
|
019c5bd839dedd2423f9604936886eaff252e04b
|
abba8c1719a8bda6db8efde2d46fd1b423ae4304
|
refs/heads/trunk
| 2023-08-31T20:14:22.369970
| 2023-08-21T20:17:57
| 2023-08-21T20:17:57
| 419,555
| 1,644
| 968
|
Apache-2.0
| 2023-09-13T19:34:44
| 2009-12-11T09:00:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,303
|
py
|
test_digitalocean_spaces.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.test import LibcloudTestCase
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Object, Container
from libcloud.test.secrets import STORAGE_S3_PARAMS
from libcloud.storage.drivers.digitalocean_spaces import (
DO_SPACES_HOSTS_BY_REGION,
DOSpacesConnectionAWS2,
DOSpacesConnectionAWS4,
DigitalOceanSpacesStorageDriver,
)
class DigitalOceanSpacesTests(LibcloudTestCase):
driver_type = DigitalOceanSpacesStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "nyc3.digitaloceanspaces.com"
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, signature_version="2", host=self.default_host)
def setUp(self):
self.driver = self.create_driver()
self.container = Container("test-container", {}, self.driver)
self.object = Object("test-object", 1, "hash", {}, "meta_data", self.container, self.driver)
def test_connection_class_type(self):
res = self.driver.connectionCls is DOSpacesConnectionAWS2
self.assertTrue(res, "driver.connectionCls does not match!")
def test_connection_class_host(self):
host = self.driver.connectionCls.host
self.assertEqual(host, self.default_host)
def test_container_enable_cdn_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.container.enable_cdn()
def test_container_get_cdn_url_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.container.get_cdn_url()
def test_object_enable_cdn_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.object.enable_cdn()
def test_object_get_cdn_url_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.object.get_cdn_url()
def test_invalid_signature_version(self):
with self.assertRaises(ValueError):
self.driver_type(*self.driver_args, signature_version="3", host=self.default_host)
def test_invalid_region(self):
with self.assertRaises(LibcloudError):
self.driver_type(*self.driver_args, region="atlantis", host=self.default_host)
class DigitalOceanSpacesTests_v4(DigitalOceanSpacesTests):
driver_type = DigitalOceanSpacesStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "nyc3.digitaloceanspaces.com"
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, signature_version="4")
def test_connection_class_type(self):
res = self.driver.connectionCls is DOSpacesConnectionAWS4
self.assertTrue(res, "driver.connectionCls does not match!")
def test_connection_class_host(self):
host = self.driver.connectionCls.host
self.assertEqual(host, self.default_host)
def test_valid_regions(self):
for region, hostname in DO_SPACES_HOSTS_BY_REGION.items():
driver = self.driver_type(*self.driver_args, region=region)
self.assertEqual(driver.connectionCls.host, hostname)
self.assertTrue(driver.connectionCls.host.startswith(region))
class DigitalOceanSpacesDoubleInstanceTests(LibcloudTestCase):
driver_type = DigitalOceanSpacesStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "nyc3.digitaloceanspaces.com"
alt_host = "ams3.digitaloceanspaces.com"
def setUp(self):
self.driver_v2 = self.driver_type(*self.driver_args, signature_version="2")
self.driver_v4 = self.driver_type(*self.driver_args, signature_version="4", region="ams3")
def test_connection_class_type(self):
res = self.driver_v2.connectionCls is DOSpacesConnectionAWS2
self.assertTrue(res, "driver.connectionCls does not match!")
res = self.driver_v4.connectionCls is DOSpacesConnectionAWS4
self.assertTrue(res, "driver.connectionCls does not match!")
# Verify again that connection class hasn't been overridden when
# instantiating a second driver class
res = self.driver_v2.connectionCls is DOSpacesConnectionAWS2
self.assertTrue(res, "driver.connectionCls does not match!")
def test_connection_class_host(self):
host = self.driver_v2.connectionCls.host
self.assertEqual(host, self.default_host)
host = self.driver_v4.connectionCls.host
self.assertEqual(host, self.alt_host)
if __name__ == "__main__":
sys.exit(unittest.main())
|
7f4da01c743ea3907df701a06c4d71fd3840c0e3
|
7ebb2f0458d3813737dd045473d7c1398d08392d
|
/pyclesperanto_prototype/_tier0/_array_operators.py
|
cb7d95b3fbf1ca36ad4c10157175916ff54f1602
|
[
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
clEsperanto/pyclesperanto_prototype
|
b3192d6984f45571fe0a7dfcceee2058bc4debbe
|
b465c8669f8e9326874139cf4b9c9af22c22757c
|
refs/heads/master
| 2023-09-04T11:07:55.828329
| 2023-08-25T17:18:30
| 2023-08-25T17:18:30
| 248,206,619
| 152
| 36
|
BSD-3-Clause
| 2023-05-23T09:44:51
| 2020-03-18T10:56:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 20,045
|
py
|
_array_operators.py
|
import numpy as np
cl_buffer_datatype_dict = {
bool: "bool",
np.uint8: "uchar",
np.uint16: "ushort",
np.uint32: "uint",
np.uint64: "ulong",
np.int8: "char",
np.int16: "short",
np.int32: "int",
np.int64: "long",
np.float32: "float",
np.complex64: "cfloat_t",
int: "int",
float: "float",
np.float64: "float",
}
_supported_numeric_types = tuple(cl_buffer_datatype_dict.keys())
class ArrayOperators():
@property
def T(self):
from .._tier1 import transpose_xy, transpose_xz
if len(self.shape) == 2:
return transpose_xy(self)
elif len(self.shape) == 3:
return transpose_xz(self)
else:
raise ValueError("Only 2D and 3D arrays supported.")
def min(self, axis=None, out=None):
from .._tier2 import minimum_of_all_pixels
from .._tier1 import minimum_x_projection
from .._tier1 import minimum_y_projection
from .._tier1 import minimum_z_projection
if axis==0:
result = minimum_z_projection(self)
elif axis==1:
result = minimum_y_projection(self)
elif axis==2:
result = minimum_x_projection(self)
elif axis is None:
result = minimum_of_all_pixels(self)
else:
raise ValueError("Axis " + axis + " not supported")
if out is not None:
np.copyto(out, result.get().astype(out.dtype))
return result
def max(self, axis=None, out=None):
from .._tier2 import maximum_of_all_pixels
from .._tier1 import maximum_x_projection
from .._tier1 import maximum_y_projection
from .._tier1 import maximum_z_projection
if axis==0:
result = maximum_z_projection(self)
elif axis==1:
result = maximum_y_projection(self)
elif axis==2:
result = maximum_x_projection(self)
elif axis is None:
result = maximum_of_all_pixels(self)
else:
raise ValueError("Axis " + axis + " not supported")
if out is not None:
np.copyto(out, result.get().astype(out.dtype))
return result
def sum(self, axis=None, out=None):
from .._tier2 import sum_of_all_pixels
from .._tier1 import sum_x_projection
from .._tier1 import sum_y_projection
from .._tier1 import sum_z_projection
if axis==0:
result = sum_z_projection(self)
elif axis==1:
result = sum_y_projection(self)
elif axis==2:
result = sum_x_projection(self)
elif axis is None:
result = sum_of_all_pixels(self)
else:
raise ValueError("Axis " + axis + " not supported")
if out is not None:
np.copyto(out, result.get().astype(out.dtype))
return result
# TODO: Not sure if the following are necessary / could be circumvented.
# For now tests fail if we remove them.
def __iadd__(x1, x2):
from .._tier1 import copy
temp = copy(x1)
if isinstance(x2, _supported_numeric_types) :
from .._tier1 import add_image_and_scalar
return add_image_and_scalar(temp, x1, scalar=x2)
else:
from .._tier1 import add_images_weighted
return add_images_weighted(temp, x2, x1)
def __sub__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import add_image_and_scalar
return add_image_and_scalar(x1, scalar=-x2)
else:
from .._tier1 import add_images_weighted
return add_images_weighted(x1, x2, factor2=-1)
def __div__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import multiply_image_and_scalar
return multiply_image_and_scalar(x1, scalar=1.0 / x2)
else:
from .._tier1 import divide_images
return divide_images(x1, x2)
def __truediv__(x1, x2):
return x1.__div__(x2)
def __idiv__(x1, x2):
from .._tier1 import copy
temp = copy(x1)
if isinstance(x2, _supported_numeric_types):
from .._tier1 import multiply_image_and_scalar
return multiply_image_and_scalar(temp, x1, scalar=1.0 / x2)
else:
from .._tier1 import divide_images
return divide_images(temp, x2, x1)
def __itruediv__(x1, x2):
return x1.__idiv__(x2)
def __mul__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import multiply_image_and_scalar
return multiply_image_and_scalar(x1, scalar=x2)
else:
from .._tier1 import multiply_images
return multiply_images(x1, x2)
def __imul__(x1, x2):
from .._tier1 import copy
temp = copy(x1)
if isinstance(x2, _supported_numeric_types):
from .._tier1 import multiply_image_and_scalar
return multiply_image_and_scalar(temp, x1, scalar=x2)
else:
from .._tier1 import multiply_images
return multiply_images(temp, x2, x1)
def __gt__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import greater_constant
return greater_constant(x1, constant=x2)
else:
from .._tier1 import greater
return greater(x1, x2)
def __ge__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import greater_or_equal_constant
return greater_or_equal_constant(x1, constant=x2)
else:
from .._tier1 import greater_or_equal
return greater_or_equal(x1, x2)
def __lt__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import smaller_constant
return smaller_constant(x1, constant=x2)
else:
from .._tier1 import smaller
return smaller(x1, x2)
def __le__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import smaller_or_equal_constant
return smaller_or_equal_constant(x1, constant=x2)
else:
from .._tier1 import smaller_or_equal
return smaller_or_equal(x1, x2)
def __eq__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import equal_constant
return equal_constant(x1, constant=x2)
else:
from .._tier1 import equal
return equal(x1, x2)
def __ne__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import not_equal_constant
return not_equal_constant(x1, constant=x2)
else:
from .._tier1 import not_equal
return not_equal(x1, x2)
def __pow__(x1, x2):
if isinstance(x2, _supported_numeric_types):
from .._tier1 import power
return power(x1, exponent=x2)
else:
from .._tier1 import power_images
return power_images(x1, x2)
def __ipow__(x1, x2):
from .._tier1 import copy
temp = copy(x1)
if isinstance(x2, _supported_numeric_types):
from .._tier1 import power
return power(temp, x1, exponent=x2)
else:
from .._tier1 import power_images
return power_images(temp, x2, x1)
def __setitem__(self, index, value):
if isinstance(index, list):
index = tuple(index)
if isinstance(index, (tuple, np.ndarray)) and index[0] is not None and isinstance(index[0], (tuple, list, np.ndarray)):
if len(index) == len(self.shape):
if len(index[0]) > 0:
# switch xy in 2D / xz in 3D, because clesperanto expects an X-Y-Z array;
# see also https://github.com/clEsperanto/pyclesperanto_prototype/issues/49
index = list(index)
index[0], index[-1] = index[-1], index[0]
# send coordinates to GPU
from ._push import push
coordinates = push(np.asarray(index))
num_coordinates = coordinates.shape[-1]
if isinstance(value, (int, float)):
# make an array containing new values for every pixel
number = value
from ._create import create
value = create((1, 1, num_coordinates))
from .._tier1 import set
set(value, number)
# overwrite pixels
from .._tier1 import write_values_to_positions
from .._tier2 import combine_vertically
values_and_positions = combine_vertically(coordinates, value)
write_values_to_positions(values_and_positions, self)
return
return super().__setitem__(index, value)
def __getitem__(self, index):
result = None
if isinstance(index, slice):
index = (index,)
if isinstance(index, list):
index = tuple(index)
if isinstance(index, (tuple, np.ndarray)) and index[0] is not None and isinstance(index[0], (tuple, list, np.ndarray)):
if len(index) == len(self.shape):
if len(index[0]) > 0:
# switch xy in 2D / xz in 3D, because clesperanto expects an X-Y-Z array;
# see also https://github.com/clEsperanto/pyclesperanto_prototype/issues/49
index = list(index)
index[0], index[-1] = index[-1], index[0]
# send coordinates to GPU
from ._push import push
coordinates = push(np.asarray(index))
# read values from positions
from .._tier1 import read_intensities_from_positions
result = read_intensities_from_positions(coordinates, self)
else:
return []
if result is None:
if isinstance(index, tuple):
if any(x is Ellipsis for x in index):
# handle img[1, ..., 1] or img[1, ...]
new_index = []
for x in index:
if x is Ellipsis:
print(len(self.shape), len(index), "lens")
for i in range(len(self.shape) - len(index) + 1):
new_index.append(slice(None, None, None))
else:
new_index.append(x)
index = tuple(new_index)
if any(isinstance(x, slice) for x in index):
if len(self.shape) > 2: # 3D image
if len(index) > 2:
x_range = index[2]
else:
x_range = slice(None, None, None)
if len(index) > 1:
y_range = index[1]
else:
y_range = slice(None, None, None)
if len(index) > 0:
z_range = index[0]
else:
z_range = slice(None, None, None)
else:
if len(index) > 1:
x_range = index[1]
else:
x_range = slice(None, None, None)
if len(index) > 0:
y_range = index[0]
else:
y_range = slice(None, None, None)
z_range = slice(None, None, None)
if x_range is None:
x_range = slice(None, None, None)
if y_range is None:
y_range = slice(None, None, None)
if z_range is None:
z_range = slice(None, None, None)
eliminate_x = False
eliminate_y = False
eliminate_z = False
if not isinstance(x_range, slice) and np.issubdtype(type(x_range), np.integer):
x_range = slice(x_range, x_range + 1, 1)
eliminate_x = True
if not isinstance(y_range, slice) and np.issubdtype(type(y_range), np.integer):
y_range = slice(y_range, y_range + 1, 1)
eliminate_y = True
if not isinstance(z_range, slice) and np.issubdtype(type(z_range), np.integer):
z_range = slice(z_range, z_range + 1, 1)
eliminate_z = True
from .._tier1 import range as arange
result = arange(self, start_x=x_range.start, stop_x=x_range.stop, step_x=x_range.step,
start_y=y_range.start, stop_y=y_range.stop, step_y=y_range.step,
start_z=z_range.start, stop_z=z_range.stop, step_z=z_range.step)
if (eliminate_x * 1) + (eliminate_y * 1) + (eliminate_z * 1) <= 1:
from .._tier0 import create
from .._tier1 import copy_slice, copy_vertical_slice, copy_horizontal_slice, copy
if eliminate_x:
output = create(result.shape[:2], self.dtype)
result = copy_vertical_slice(result, output)
if eliminate_y:
output = create((result.shape[0],result.shape[2]), self.dtype)
result = copy_horizontal_slice(result, output)
if eliminate_z:
output = create(result.shape[1:], self.dtype)
result = copy_slice(result, output)
else:
from .._tier0 import push, pull
# todo: this is a necessary workaround because we can't handle 1d-arrays in pyclesperanto yet
result = push(pull(self).__getitem__(index))
if result is None:
if hasattr(super(), "__getitem__"):
result = super().__getitem__(index)
else:
result = self.get().__getitem__(index)
if result.size == 1 and isinstance(result, (ArrayOperators)):
result = result.get()
return result
def __iter__(self):
class MyIterator():
def __init__(self, image):
self.image = image
self._iter_index = 0
def __next__(self):
import numpy as np
from .._tier0 import create
from .._tier1 import copy_slice
if not hasattr(self, "_iter_index"):
self._iter_index = 0
if self._iter_index < self.image.shape[0]:
if len(self.image.shape) < 3:
result = np.asarray(self.image)[self._iter_index]
elif len(self.image.shape) == 3:
output = create(self.image.shape[1:])
result = copy_slice(self.image, output, self._iter_index)
else:
raise ValueError("Only 1D, 2D or 3D array are supported.")
self._iter_index = self._iter_index + 1
return result
else:
raise StopIteration
return MyIterator(self)
# adapted from https://github.com/napari/napari/blob/d6bc683b019c4a3a3c6e936526e29bbd59cca2f4/napari/utils/notebook_display.py#L54-L73
def _plt_to_png(self):
"""PNG representation of the image object for IPython.
Returns
-------
In memory binary stream containing a PNG matplotlib image.
"""
import matplotlib.pyplot as plt
from io import BytesIO
with BytesIO() as file_obj:
plt.savefig(file_obj, format='png')
plt.close() # supress plot output
file_obj.seek(0)
png = file_obj.read()
return png
def _png_to_html(self, png):
import base64
url = 'data:image/png;base64,' + base64.b64encode(png).decode('utf-8')
return f'<img src="{url}"></img>'
def _repr_html_(self):
"""HTML representation of the image object for IPython.
Returns
-------
HTML text with the image and some properties.
"""
import numpy as np
import matplotlib.pyplot as plt
from .._tier9 import imshow
size_in_pixels = np.prod(self.shape)
size_in_bytes = size_in_pixels * self.dtype.itemsize
labels = (self.dtype == np.uint32)
# In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view
if len(self.shape) in (2, 3) and size_in_pixels >= 100:
import matplotlib.pyplot as plt
imshow(self,
labels=labels,
continue_drawing=True,
colorbar=not labels)
image = self._png_to_html(self._plt_to_png())
else:
return "<pre>cle.array(" + str(np.asarray(self)) + ", dtype=" + str(self.dtype) + ")</pre>"
if size_in_bytes > 1024:
size_in_bytes = size_in_bytes / 1024
if size_in_bytes > 1024:
size_in_bytes = size_in_bytes / 1024
if size_in_bytes > 1024:
size_in_bytes = size_in_bytes / 1024
size = "{:.1f}".format(size_in_bytes) + " GB"
else:
size = "{:.1f}".format(size_in_bytes) + " MB"
else:
size = "{:.1f}".format(size_in_bytes) + " kB"
else:
size = "{:.1f}".format(size_in_bytes) + " B"
histogram = ""
if size_in_bytes < 100 * 1024 * 1024:
if not labels:
import numpy as np
from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels
from .._tier3 import histogram
num_bins = 32
h = np.asarray(histogram(self, num_bins=num_bins))
plt.figure(figsize=(1.8, 1.2))
plt.bar(range(0, len(h)), h)
# hide axis text
# https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots
# https://pythonguides.com/matplotlib-remove-tick-labels
frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])
plt.tick_params(left=False, bottom=False)
histogram = self._png_to_html(self._plt_to_png())
min_max = "<tr><td>min</td><td>" + str(self.min()) + "</td></tr>" + \
"<tr><td>max</td><td>" + str(self.max()) + "</td></tr>"
else:
min_max = ""
all = [
"<table>",
"<tr>",
"<td>",
image,
"</td>",
"<td style=\"text-align: center; vertical-align: top;\">",
"<b><a href=\"https://github.com/clEsperanto/pyclesperanto_prototype\" target=\"_blank\">cle._</a> image</b><br/>",
"<table>",
"<tr><td>shape</td><td>" + str(self.shape).replace(" ", " ") + "</td></tr>",
"<tr><td>dtype</td><td>" + str(self.dtype) + "</td></tr>",
"<tr><td>size</td><td>" + size + "</td></tr>",
min_max,
"</table>",
histogram,
"</td>",
"</tr>",
"</table>",
]
return "\n".join(all)
|
ab9de67ab0739dfa2d824f85aa93470dc49828fc
|
6ffc81125d6bb5f8476f95b2334a27807b8290de
|
/flexneuart/ranker/bm25py.py
|
d41873bad0d97717e0a1c8ac8bf044bb7365d5d8
|
[
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
oaqa/FlexNeuART
|
4cb341ca3c3f94fa28a7cfd4aef5451de3a4a2cb
|
0bd3e06735ff705731fb6cee62d3486276beccdf
|
refs/heads/master
| 2023-09-01T00:19:33.980081
| 2023-05-26T19:19:30
| 2023-05-26T19:19:30
| 64,071,121
| 156
| 21
|
Apache-2.0
| 2023-09-10T01:27:05
| 2016-07-24T15:08:03
|
Java
|
UTF-8
|
Python
| false
| false
| 5,434
|
py
|
bm25py.py
|
#
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A Python-level reimplementation of BM25 ranker. It produces very
comparable (though slightly different) results from the Java bm25 ranker.
You need to call configure_classpath() before using this functionality.
"""
from collections import Counter
from math import log
from flexneuart.text_proc import handle_case
from flexneuart.ranker.base import BaseRanker
from flexneuart.retrieval.fwd_index import get_forward_index
from flexneuart.retrieval.utils import DataEntryFields
from flexneuart.retrieval.cand_provider import CandidateEntry
from typing import List, Union, Tuple, Dict
class BM25Ranker(BaseRanker):
"""
A Python version the BM25 ranker, which can be used in various experiments where
document text is being modified on the fly without updating the document index.
"""
def __init__(self, resource_manager,
query_field_name,
index_field_name,
idf_index_field_name,
text_proc_obj_query=None,
text_proc_obj_doc=None,
keep_case=False,
k1=1.2, b=0.75):
"""Reranker constructor.
:param resource_manager: a resource manager object
:param index_field_name: the name of the text field
:param idf_index_field_name the name of the field to extract IDF values
:param keep_case: do not lower case
:param text_proc_obj_query: a text processing object for the query that would typically
lemmatize/stem text and optionally remove stop words
:param text_proc_obj_doc: a text processing object for the document
:param query_field_name: the name of the query field
:param k1 BM25 k1 parameter
:param b BM25 b parameter
"""
super().__init__()
self.resource_manager = resource_manager
self.k1 = k1
self.b = b
self.text_proc_obj_query = text_proc_obj_query
self.text_proc_obj_doc = text_proc_obj_doc
self.do_lower_case = not keep_case
self.query_field_name = query_field_name
self.fwd_indx = get_forward_index(resource_manager, index_field_name)
self.fwd_parsed_index = get_forward_index(resource_manager, idf_index_field_name)
assert self.fwd_parsed_index.indx.isParsed() or self.fwd_parsed_index.indx.isParsedText()
self.doc_qty = self.fwd_parsed_index.get_doc_qty()
self.inv_avg_doc_len = 1.0 / self.fwd_parsed_index.get_avg_doc_len()
def calc_idf(self, word):
word_entry = self.fwd_parsed_index.indx.getWordEntry(word)
if word_entry is None:
return 0
n = word_entry.mWordFreq
return log(1 + (self.doc_qty - n + 0.5) / (n + 0.5))
def handle_case(self, text: str):
return handle_case(self.do_lower_case, text)
def score_candidates(self,
cand_list: List[Union[CandidateEntry, Tuple[str, float]]],
query_info_obj_or_dict: Union[DataEntryFields, dict]) -> Dict[str, float]:
"""Score, but does not rank, a candidate list obtained from the candidate provider.
Note that this function may (though this is ranker-dependent) use all query field fields,
not just a field that was used to retrieve the list of candidate entries!
:param cand_list: a list of the candidate records
:param query_info_obj: a query information object
:return: a dictionary where keys are document IDs and values are document scores
"""
query_text = self.get_query_text(query_info_obj_or_dict)
if self.text_proc_obj_query is not None:
query_text = self.text_proc_obj_query(query_text)
query_text = self.handle_case(query_text)
query_toks = query_text.split()
query_terms_idfs = {w: self.calc_idf(w) for w in set(query_toks)}
res = {}
for doc_id, score in cand_list:
doc_text = self.fwd_indx.get_doc_text(doc_id)
if self.text_proc_obj_doc is not None:
doc_text = self.text_proc_obj_doc(doc_text)
doc_text = self.handle_case(doc_text)
doc_toks = doc_text.split()
doc_len = len(doc_toks)
counts = Counter(doc_toks)
score = 0
for qterm in query_toks:
tf = counts[qterm]
if tf > 0:
qidf = query_terms_idfs[qterm]
norm_tf = (tf * (self.k1 + 1)) / \
(tf + self.k1 * (1 - self.b + self.b * doc_len * self.inv_avg_doc_len))
score += qidf * norm_tf
res[doc_id] = score
return res
|
9a9f82ed13e2bad28b3673612606906a9976a696
|
e4816b15dc8b82610676aae3a853eef1cd29a13b
|
/bigsi/__init__.py
|
147ef65ee679b8e7c7d5016cc1d07112fddea7c8
|
[
"MIT"
] |
permissive
|
Phelimb/BIGSI
|
eae3f1fc42aac0eb615d791597e66e0ab6f73668
|
bf34abbb9d6f72a9f0c64c40eefc44d810a2502e
|
refs/heads/master
| 2023-09-03T22:27:18.099341
| 2021-08-31T17:23:22
| 2021-08-31T17:23:22
| 66,791,051
| 113
| 20
|
MIT
| 2023-08-24T20:12:06
| 2016-08-28T21:12:24
|
Python
|
UTF-8
|
Python
| false
| false
| 112
|
py
|
__init__.py
|
# from bigsi import version
from bigsi.utils import *
from bigsi.graph.bigsi import BIGSI
# from main import *
|
06225360e29932dbebe4c75cb1bde087c9a080c1
|
dc387b1d0c247aca4d6227a2dcc34c30cd2558fd
|
/loadgen/demos/lon/py_demo_server_lon.py
|
bee0b2f8b4a62d152dbb48e1dfecd8e0f4d79e28
|
[
"Apache-2.0"
] |
permissive
|
mlcommons/inference
|
6f0c725fb3a8d65c349bbd60be23e1db7f8dea74
|
c540fcc99eeacfb5c51de8daa0f8cca339f50799
|
refs/heads/master
| 2023-08-29T11:09:18.621119
| 2023-08-25T16:22:26
| 2023-08-25T16:22:26
| 148,566,613
| 575
| 253
|
Apache-2.0
| 2023-09-11T11:26:45
| 2018-09-13T01:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,566
|
py
|
py_demo_server_lon.py
|
# Copyright 2019 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Python demo showing how to use the MLPerf Inference LoadGen over the Network bindings.
This programs runs in the LON Node side.
It runs the demo in MLPerf server mode over the network.
It communicates over the network with a Network SUT node,
which is running the Network SUT demo based on a flask server, implemented in SUT_over_network.py
"""
import threading
import requests
import array
import time
from absl import app
from absl import flags
import mlperf_loadgen
FLAGS = flags.FLAGS
flags.DEFINE_list('sut_server', 'http://localhost:8000',
'Address of the server(s) under test.')
class QSL:
"""Demo QuerySampleLibrary with dummy features."""
def __init__(self, total_sample_count, performance_sample_count):
self.eval_features = {
i: f"what_is_my_dummy_feature_{i}?" for i in range(total_sample_count)}
self.qsl = mlperf_loadgen.ConstructQSL(
total_sample_count, performance_sample_count, self.load_samples_to_ram, self.unload_samples_from_ram)
def get_features(self, sample_id):
"""Returns the feature for a given sample id."""
return self.eval_features[sample_id]
def load_samples_to_ram(self, query_samples):
"""Loads the features for the given query samples into RAM."""
# Current implementation is not using this functionality.
del query_samples
return
def unload_samples_from_ram(self, query_samples):
"""Unloads the features for the given query samples from RAM."""
# Current implementation is not using this functionality.
del query_samples
return
def __del__(self):
mlperf_loadgen.DestroyQSL(self.qsl)
class QDL:
"""QDL acting as a proxy to the SUT.
This QDL communicates with the SUT via HTTP.
It uses two endpoints to communicate with the SUT:
- /predict/ : Send a query to the SUT and get a response.
- /getname/ : Get the name of the SUT. Send a getname to the SUT and get a response.
"""
def __init__(self, qsl: QSL, sut_server_addr: list):
"""
Constructor for the QDL.
Args:
qsl: The QSL to use.
sut_server_addr: A list of addresses of the SUT.
"""
self.qsl = qsl
# Construct QDL from the python binding
self.qdl = mlperf_loadgen.ConstructQDL(
self.issue_query, self.flush_queries, self.client_get_name)
self.sut_server_addr = sut_server_addr
self.num_nodes = len(sut_server_addr)
# For round robin between the SUTs:
self.next_sut_id = 0
self.lock = threading.Lock()
def issue_query(self, query_samples):
"""Process the query to send to the SUT"""
threading.Thread(target=self.process_query_async,
args=[query_samples]).start()
def flush_queries(self):
"""Flush the queries. Dummy implementation."""
pass
def process_query_async(self, query_samples):
"""
This function is called by the Loadgen in a separate thread.
It is responsible for
1. Creating a query for the SUT, by reading the features from the QSL.
2. Sending the query to the SUT.
3. Waiting for the response from the SUT.
4. Deserializing the response.
5. Calling mlperf_loadgen.QuerySamplesComplete(query_samples, response)
Args:
query_samples: A list of QuerySample objects.
"""
responses = []
for s in query_samples:
# Overall process:
# QDL builds a real-world query and sends to SUT --> SUT processes --> SUT sends back to QDL
# Read features from the QSL
features = self.qsl.get_features(s.index)
time.sleep(.001) # Ensure a maximal rate of queries to the SUT
# Send the query to SUT in round robin
# Wait for a response
sut_result = self.client_predict(features, s.index)
response_array = array.array('B', sut_result.encode('utf-8'))
bi = response_array.buffer_info()
responses.append(mlperf_loadgen.QuerySampleResponse(
s.id, bi[0], bi[1]))
mlperf_loadgen.QuerySamplesComplete(responses)
def get_sut_id_round_robin(self):
"""Get the SUT id in round robin."""
with self.lock:
res = self.next_sut_id
self.next_sut_id = (self.next_sut_id + 1) % self.num_nodes
return res
def client_predict(self, query, id):
"""Serialize the query, send it to the SUT in round robin, and return the deserialized response."""
url = '{}/predict/'.format(self.sut_server_addr[self.get_sut_id_round_robin()])
response = requests.post(url, json={'query': query, id: id})
return response.json()['result']
def client_get_name(self):
"""Get the name of the SUT from ALL the SUTS."""
if len(self.sut_server_addr) == 1:
return requests.post(f'{self.sut_server_addr[0]}/getname/').json()['name']
sut_names = [requests.post(f'{addr}/getname/').json()['name'] for addr in self.sut_server_addr]
return "Multi-node SUT: " + ', '.join(sut_names)
def __del__(self):
mlperf_loadgen.DestroyQDL(self.qdl)
def main(argv):
del argv
settings = mlperf_loadgen.TestSettings()
settings.scenario = mlperf_loadgen.TestScenario.Server
settings.mode = mlperf_loadgen.TestMode.PerformanceOnly
settings.server_target_qps = 100
settings.server_target_latency_ns = 100000000
settings.min_query_count = 100
settings.min_duration_ms = 10000
# QDL and QSL
qsl = QSL(1024, 128)
qdl = QDL(qsl, sut_server_addr=FLAGS.sut_server)
mlperf_loadgen.StartTest(qdl.qdl, qsl.qsl, settings)
if __name__ == "__main__":
app.run(main)
|
f6ec38876a9b115cd1e56eda29003924b786b1ed
|
d6712c5008277098937ac90cfe42533ed9ee55a0
|
/tests/providers/test_ultradns.py
|
fdb37d7b257c55fec351c17268dd769002455d5e
|
[
"MIT"
] |
permissive
|
AnalogJ/lexicon
|
50492bccc304c6d703d29382d6d18359a8a171eb
|
55914350aa28851b0c4df487bc5176b5a4841b88
|
refs/heads/master
| 2023-08-17T00:52:46.893312
| 2023-08-16T14:11:46
| 2023-08-16T14:11:46
| 50,903,853
| 1,397
| 402
|
MIT
| 2023-09-14T14:41:41
| 2016-02-02T07:53:04
|
Python
|
UTF-8
|
Python
| false
| false
| 648
|
py
|
test_ultradns.py
|
"""Integration tests for UltraDNS"""
from unittest import TestCase
from integration_tests import IntegrationTestsV2
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class UltradnsProviderTests(TestCase, IntegrationTestsV2):
"""TestCase for UltraDNS"""
provider_name = "ultradns"
domain = "example-abtest.com"
def _filter_headers(self):
return ["Authorization"]
def _filter_post_data_parameters(self):
return ["username", "password", "accessToken"]
|
79dc85dee81ae5da150effeb7ce15143f4b66bb5
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/modelscope/modelscope/trainers/nlp/space/trainer/intent_trainer.py
|
dc6b317b0d198e915f822f64e9f2d1c0fc9c5f69
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 29,570
|
py
|
intent_trainer.py
|
# Copyright (c) Alibaba, Inc. and its affiliates.
import os
import time
from collections import OrderedDict
import json
import numpy as np
import torch
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from modelscope.trainers.nlp.space.metrics.metrics_tracker import \
MetricsTracker
from modelscope.utils.constant import ModelFile
from modelscope.utils.logger import get_logger
class Trainer(object):
def __init__(self,
model,
to_tensor,
config,
reader=None,
logger=None,
lr_scheduler=None,
optimizer=None):
self.model = model
self.to_tensor = to_tensor
self.do_train = config.do_train
self.do_infer = config.do_infer
self.is_decreased_valid_metric = config.Trainer.valid_metric_name[
0] == '-'
self.valid_metric_name = config.Trainer.valid_metric_name[1:]
self.num_epochs = config.Trainer.num_epochs
self.save_dir = config.Trainer.save_dir
self.log_steps = config.Trainer.log_steps
self.valid_steps = config.Trainer.valid_steps
self.save_checkpoint = config.Trainer.save_checkpoint
self.save_summary = config.Trainer.save_summary
self.learning_method = config.Dataset.learning_method
self.weight_decay = config.Model.weight_decay
self.warmup_steps = config.Model.warmup_steps
self.batch_size_label = config.Trainer.batch_size_label
self.batch_size_nolabel = config.Trainer.batch_size_nolabel
self.gpu = config.Trainer.gpu
self.lr = config.Model.lr
self.model = model
self.func_model = self.model.module if self.gpu > 1 else self.model
self.reader = reader
self.tokenizer = reader.tokenizer
self.lr_scheduler = lr_scheduler
self.optimizer = optimizer
self.logger = logger or get_logger()
self.batch_metrics_tracker_label = MetricsTracker()
self.token_metrics_tracker_label = MetricsTracker()
self.batch_metrics_tracker_nolabel = MetricsTracker()
self.token_metrics_tracker_nolabel = MetricsTracker()
self.best_valid_metric = float(
'inf' if self.is_decreased_valid_metric else '-inf')
self.epoch = 0
self.batch_num = 0
def set_optimizers(self, num_training_steps_per_epoch):
"""
Setup the optimizer and the learning rate scheduler.
from transformers.Trainer
parameters from cfg: lr (1e-3); warmup_steps
"""
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'norm.weight']
optimizer_grouped_parameters = [
{
'params': [
p for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
'weight_decay':
self.weight_decay,
},
{
'params': [
p for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
'weight_decay':
0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.lr)
num_training_steps = num_training_steps_per_epoch * self.num_epochs
num_warmup_steps = self.warmup_steps if self.warmup_steps >= 0 else int(
num_training_steps * 0.1)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps)
# reset optimizer and lr_scheduler
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
# log info
self.logger.info(
f'***** Running training: {self.learning_method} *****')
self.logger.info(' Num Epochs = %d', self.num_epochs)
self.logger.info(
' Num Training steps(one turn in a batch of dialogs) per epoch = %d',
num_training_steps_per_epoch)
self.logger.info(' Batch size for labeled data = %d',
self.batch_size_label)
self.logger.info(' Batch size for unlabeled data = %d',
self.batch_size_nolabel)
self.logger.info(' Total optimization steps = %d', num_training_steps)
self.logger.info(' Total warmup steps = %d', num_warmup_steps)
self.logger.info('************************************')
def train(self,
train_label_iter,
train_nolabel_iter=None,
valid_label_iter=None,
valid_nolabel_iter=None):
# begin training
num_epochs = self.num_epochs - self.epoch
for epoch in range(num_epochs):
self.train_epoch(
train_label_iter=train_label_iter,
train_nolabel_iter=train_nolabel_iter,
valid_label_iter=valid_label_iter,
valid_nolabel_iter=valid_nolabel_iter)
def train_epoch(self, train_label_iter, train_nolabel_iter,
valid_label_iter, valid_nolabel_iter):
"""
Train an epoch.
"""
raise NotImplementedError
def evaluate(self, data_label_iter, data_nolabel_iter, need_save=True):
raise NotImplementedError
def infer(self, data_iter, num_batches=None):
raise NotImplementedError
def save(self, is_best=False):
""" save """
train_state = {
'epoch': self.epoch,
'batch_num': self.batch_num,
'best_valid_metric': self.best_valid_metric,
'optimizer': self.optimizer.state_dict()
}
if self.lr_scheduler is not None:
train_state['lr_scheduler'] = self.lr_scheduler.state_dict()
# Save checkpoint
if self.save_checkpoint:
model_file = os.path.join(self.save_dir,
f'state_epoch_{self.epoch}.model')
torch.save(self.model.state_dict(), model_file)
self.logger.info(f"Saved model state to '{model_file}'")
train_file = os.path.join(self.save_dir,
f'state_epoch_{self.epoch}.train')
torch.save(train_state, train_file)
self.logger.info(f"Saved train state to '{train_file}'")
# Save current best model
if is_best:
best_model_file = os.path.join(self.save_dir,
ModelFile.TORCH_MODEL_BIN_FILE)
torch.save(self.model.state_dict(), best_model_file)
best_train_file = os.path.join(
self.save_dir,
'{}.train'.format(ModelFile.TORCH_MODEL_BIN_FILE))
torch.save(train_state, best_train_file)
self.logger.info(
f"Saved best model state to '{best_model_file}' with new best valid metric "
f'{self.valid_metric_name.upper()}={self.best_valid_metric:.3f}'
)
def load(self):
""" load """
def _load_model_state():
model_state_dict = torch.load(
f'{self.func_model.init_checkpoint}',
map_location=lambda storage, loc: storage)
if 'module.' in list(model_state_dict.keys())[0]:
new_model_state_dict = OrderedDict()
for k, v in model_state_dict.items():
assert k[:7] == 'module.'
new_model_state_dict[k[7:]] = v
model_state_dict = new_model_state_dict
new_model_state_dict = OrderedDict()
parameters = {
name: param
for name, param in self.func_model.named_parameters()
}
for name, param in model_state_dict.items():
if name in parameters:
if param.shape != parameters[name].shape:
assert hasattr(param, 'numpy')
arr = param.numpy()
z = np.random.normal(
scale=self.func_model.initializer_range,
size=parameters[name].shape).astype('float32')
if name == 'embedder.token_embedding.weight':
z[-param.shape[0]:] = arr
print(
f'part of parameter({name}) random normlize initialize'
)
else:
if z.shape[0] < param.shape[0]:
z = arr[:z.shape[0]]
print(f'part of parameter({name}) are dropped')
else:
z[:param.shape[0]] = arr
print(
f'part of parameter({name}) random normlize initialize'
)
dtype, device = param.dtype, param.device
z = torch.tensor(z, dtype=dtype, device=device)
new_model_state_dict[name] = z
else:
new_model_state_dict[name] = param
else:
print(f'parameter({name}) are dropped')
model_state_dict = new_model_state_dict
for name in parameters:
if name not in model_state_dict:
if parameters[name].requires_grad:
print(f'parameter({name}) random normlize initialize')
z = np.random.normal(
scale=self.func_model.initializer_range,
size=parameters[name].shape).astype('float32')
dtype, device = parameters[name].dtype, parameters[
name].device
model_state_dict[name] = torch.tensor(
z, dtype=dtype, device=device)
else:
model_state_dict[name] = parameters[name]
self.func_model.load_state_dict(model_state_dict)
self.logger.info(
f"Loaded model state from '{self.func_model.init_checkpoint}.model'"
)
def _load_train_state():
train_file = f'{self.func_model.init_checkpoint}.train'
if os.path.exists(train_file):
train_state_dict = torch.load(
train_file, map_location=lambda storage, loc: storage)
self.epoch = train_state_dict['epoch']
self.best_valid_metric = train_state_dict['best_valid_metric']
if self.optimizer is not None and 'optimizer' in train_state_dict:
self.optimizer.load_state_dict(
train_state_dict['optimizer'])
if self.lr_scheduler is not None and 'lr_scheduler' in train_state_dict:
self.lr_scheduler.load_state_dict(
train_state_dict['lr_scheduler'])
self.logger.info(
f"Loaded train state from '{train_file}' with (epoch-{self.epoch} "
f'best_valid_metric={self.best_valid_metric:.3f})')
else:
self.logger.info('Loaded no train state')
if self.func_model.init_checkpoint is None:
self.logger.info('Loaded no model !!!')
return
if self.do_train:
_load_model_state()
return
if self.do_infer:
_load_model_state()
_load_train_state()
class IntentTrainer(Trainer):
def __init__(self, model, to_tensor, config, reader=None):
super(IntentTrainer, self).__init__(model, to_tensor, config, reader)
self.example = config.Model.example
self.can_norm = config.Trainer.can_norm
def can_normalization(self, y_pred, y_true, ex_data_iter):
# compute ACC
acc_original = np.mean([y_pred.argmax(1) == y_true])
message = 'original acc: %s' % acc_original
# compute uncertainty
k = 3
y_pred_topk = np.sort(y_pred, axis=1)[:, -k:]
y_pred_topk /= y_pred_topk.sum(axis=1, keepdims=True)
y_pred_uncertainty =\
-(y_pred_topk * np.log(y_pred_topk)).sum(1) / np.log(k)
# choose threshold
# print(np.sort(y_pred_uncertainty)[-100:].tolist())
threshold = 0.7
y_pred_confident = y_pred[y_pred_uncertainty < threshold]
y_pred_unconfident = y_pred[y_pred_uncertainty >= threshold]
y_true_confident = y_true[y_pred_uncertainty < threshold]
y_true_unconfident = y_true[y_pred_uncertainty >= threshold]
# compute ACC again for high and low confidence sets
acc_confident = (y_pred_confident.argmax(1) == y_true_confident).mean() \
if len(y_true_confident) else 0.
acc_unconfident = (y_pred_unconfident.argmax(1) == y_true_unconfident).mean() \
if len(y_true_unconfident) else 0.
message += ' (%s) confident acc: %s' % (len(y_true_confident),
acc_confident)
message += ' (%s) unconfident acc: %s' % (len(y_true_unconfident),
acc_unconfident)
# get prior distribution from training set
prior = np.zeros(self.func_model.num_intent)
for _, (batch, batch_size) in ex_data_iter:
for intent_label in batch['intent_label']:
prior[intent_label] += 1.
prior /= prior.sum()
# revise each sample from the low confidence set, and compute new ACC
right, alpha, iters = 0, 1, 1
for i, y in enumerate(y_pred_unconfident):
Y = np.concatenate([y_pred_confident, y[None]], axis=0)
for j in range(iters):
Y = Y**alpha
Y /= Y.mean(axis=0, keepdims=True)
Y *= prior[None]
Y /= Y.sum(axis=1, keepdims=True)
y = Y[-1]
if y.argmax() == y_true_unconfident[i]:
right += 1
# get final ACC
acc_final = \
(acc_confident * len(y_pred_confident) + right) / \
len(y_pred)
if len(y_pred_unconfident):
message += ' new unconfident acc: %s' % (
right / len(y_pred_unconfident))
else:
message += ' no unconfident predictions'
message += ' final acc: %s' % acc_final
return acc_original, acc_final, message
def train_epoch(self, train_label_iter, train_nolabel_iter,
valid_label_iter, valid_nolabel_iter):
"""
Train an epoch.
"""
times = []
self.epoch += 1
self.batch_metrics_tracker_label.clear()
self.token_metrics_tracker_label.clear()
self.batch_metrics_tracker_nolabel.clear()
self.token_metrics_tracker_nolabel.clear()
num_label_batches = len(train_label_iter)
num_nolabel_batches = len(
train_nolabel_iter) if train_nolabel_iter is not None else 0
num_batches = max(num_label_batches, num_nolabel_batches)
train_label_iter_loop = iter(train_label_iter)
train_nolabel_iter_loop = iter(
train_nolabel_iter) if train_nolabel_iter is not None else None
report_for_unlabeled_data = True if train_nolabel_iter is not None else False
for batch_id in range(1, num_batches + 1):
# Do a training iteration
start_time = time.time()
batch_list, batch_size_list, with_label_list, loss_list, metrics_list = [], [], [], [], []
data_file_list = []
# collect batch for labeled data
try:
data_file_label, (
batch_label,
batch_size_label) = next(train_label_iter_loop)
except StopIteration:
train_label_iter_loop = iter(train_label_iter)
data_file_label, (
batch_label,
batch_size_label) = next(train_label_iter_loop)
batch_list.append(batch_label)
batch_size_list.append(batch_size_label)
with_label_list.append(True)
data_file_list.append(data_file_label)
# collect batch for unlabeled data
if train_nolabel_iter is not None:
try:
data_file_nolabel, (
batch_nolabel,
batch_size_nolabel) = next(train_nolabel_iter_loop)
except StopIteration:
train_nolabel_iter_loop = iter(train_nolabel_iter)
data_file_nolabel, (
batch_nolabel,
batch_size_nolabel) = next(train_nolabel_iter_loop)
batch_list.append(batch_nolabel)
batch_size_list.append(batch_size_nolabel)
with_label_list.append(False)
data_file_list.append(data_file_nolabel)
# forward labeled batch and unlabeled batch and collect outputs, respectively
for (batch, batch_size, with_label, data_file) in \
zip(batch_list, batch_size_list, with_label_list, data_file_list):
batch = type(batch)(
map(lambda kv: (kv[0], self.to_tensor(kv[1])),
batch.items()))
if self.example and with_label:
current_dataset = train_label_iter.data_file_to_dataset[
data_file]
example_batch = self.reader.retrieve_examples(
dataset=current_dataset,
labels=batch['intent_label'],
inds=batch['ids'],
task='intent')
example_batch = type(example_batch)(
map(lambda kv: (kv[0], self.to_tensor(kv[1])),
example_batch.items()))
for k, v in example_batch.items():
batch[k] = v
batch['epoch'] = self.epoch
batch['num_steps'] = self.batch_num
metrics = self.model(
batch,
is_training=True,
with_label=with_label,
data_file=data_file)
loss, metrics = self.balance_metrics(
metrics=metrics, batch_size=batch_size)
loss_list.append(loss)
metrics_list.append(metrics)
# combine loss for labeled data and unlabeled data
# TODO change the computation of combined loss of labeled batch and unlabeled batch
loss = loss_list[0] if len(
loss_list) == 1 else loss_list[0] + loss_list[1]
# optimization procedure
self.func_model._optimize(
loss, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler)
elapsed = time.time() - start_time
times.append(elapsed)
self.batch_num += 1
# track metrics and log temporary message
for (batch_size, metrics,
with_label) in zip(batch_size_list, metrics_list,
with_label_list):
self.track_and_log_message(
metrics=metrics,
batch_id=batch_id,
batch_size=batch_size,
num_batches=num_batches,
times=times,
with_label=with_label)
# evaluate
if self.valid_steps > 0 and valid_label_iter is not None and valid_nolabel_iter is not None \
and batch_id % self.valid_steps == 0:
self.evaluate(
data_label_iter=valid_label_iter,
data_nolabel_iter=valid_nolabel_iter)
# compute accuracy for valid dataset
accuracy = self.infer(
data_iter=valid_label_iter, ex_data_iter=train_label_iter)
# report summary message and save checkpoints
self.save_and_log_message(
report_for_unlabeled_data, cur_valid_metric=-accuracy)
def forward(self, batch):
pred = []
with torch.no_grad():
batch = type(batch)(
map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items()))
result = self.model.infer(inputs=batch)
result = {
name: result[name].cpu().detach().numpy()
for name in result
}
intent_probs = result['intent_probs']
if self.can_norm:
pred += [intent_probs]
else:
pred += np.argmax(intent_probs, axis=1).tolist()
return pred
def infer(self, data_iter, num_batches=None, ex_data_iter=None):
"""
Inference interface.
"""
self.logger.info('Generation starts ...')
infer_save_file = os.path.join(self.save_dir,
f'infer_{self.epoch}.result.json')
# Inference
batch_cnt = 0
pred, true = [], []
outputs, labels = [], []
begin_time = time.time()
with torch.no_grad():
if self.example:
for _, (batch, batch_size) in tqdm(
ex_data_iter, desc='Building train memory.'):
batch = type(batch)(
map(lambda kv: (kv[0], self.to_tensor(kv[1])),
batch.items()))
result = self.model.infer(inputs=batch)
result = {
name: result[name].cpu().detach().numpy()
for name in result
}
outputs.append(torch.from_numpy(result['features']))
labels += batch['intent_label'].tolist()
mem = torch.cat(outputs, dim=0)
mem = mem.cuda() if self.func_model.use_gpu else mem
labels = torch.LongTensor(labels).unsqueeze(0)
labels = labels.cuda() if self.func_model.use_gpu else labels
self.logger.info(f'Memory size: {mem.size()}')
for _, (batch, batch_size) in tqdm(data_iter, total=num_batches):
batch = type(batch)(
map(lambda kv: (kv[0], self.to_tensor(kv[1])),
batch.items()))
result = self.model.infer(inputs=batch)
result = {
name: result[name].cpu().detach().numpy()
for name in result
}
if self.example:
features = torch.from_numpy(result['features'])
features = features.cuda(
) if self.func_model.use_gpu else features
probs = torch.softmax(features.mm(mem.t()), dim=-1)
intent_probs = torch.zeros(
probs.size(0), self.func_model.num_intent)
intent_probs = intent_probs.cuda(
) if self.func_model.use_gpu else intent_probs
intent_probs = intent_probs.scatter_add(
-1, labels.repeat(probs.size(0), 1), probs)
intent_probs = intent_probs.cpu().detach().numpy()
else:
intent_probs = result['intent_probs']
if self.can_norm:
pred += [intent_probs]
true += batch['intent_label'].cpu().detach().tolist()
else:
pred += np.argmax(intent_probs, axis=1).tolist()
true += batch['intent_label'].cpu().detach().tolist()
batch_cnt += 1
if batch_cnt == num_batches:
break
if self.can_norm:
true = np.array(true)
pred = np.concatenate(pred, axis=0)
acc_original, acc_final, message = self.can_normalization(
y_pred=pred, y_true=true, ex_data_iter=ex_data_iter)
accuracy = max(acc_original, acc_final)
infer_results = {
'accuracy': accuracy,
'pred_labels': pred.tolist(),
'message': message
}
metrics_message = f'Accuracy: {accuracy} {message}'
else:
accuracy = sum(p == t for p, t in zip(pred, true)) / len(pred)
infer_results = {'accuracy': accuracy, 'pred_labels': pred}
metrics_message = f'Accuracy: {accuracy}'
self.logger.info(f'Saved inference results to {infer_save_file}')
with open(infer_save_file, 'w') as fp:
json.dump(infer_results, fp, indent=2)
message_prefix = f'[Infer][{self.epoch}]'
time_cost = f'TIME-{time.time() - begin_time:.3f}'
message = ' '.join([message_prefix, metrics_message, time_cost])
self.logger.info(message)
return accuracy
def track_and_log_message(self, metrics, batch_id, batch_size, num_batches,
times, with_label):
# track metrics
batch_metrics_tracker = self.batch_metrics_tracker_label if with_label else self.batch_metrics_tracker_nolabel
token_metrics_tracker = self.token_metrics_tracker_label if with_label else self.token_metrics_tracker_nolabel
metrics = {
k: v.cpu().detach().numpy() if isinstance(v, torch.Tensor) else v
for k, v in metrics.items()
}
mlm_num = metrics.pop('mlm_num', 0)
batch_metrics = {k: v for k, v in metrics.items() if 'token' not in k}
token_metrics = {k: v for k, v in metrics.items() if 'token' in k}
batch_metrics_tracker.update(batch_metrics, batch_size)
token_metrics_tracker.update(token_metrics, mlm_num)
# log message
if self.log_steps > 0 and batch_id % self.log_steps == 0:
batch_metrics_message = batch_metrics_tracker.value()
token_metrics_message = token_metrics_tracker.value()
label_prefix = 'Labeled' if with_label else 'Unlabeled'
message_prefix = f'[Train][{self.epoch}][{batch_id}/{num_batches}][{label_prefix}]'
avg_time = f'AVG_Time-{sum(times[-self.log_steps:]) / self.log_steps:.3f}'
message = ' '.join([
message_prefix, batch_metrics_message, token_metrics_message,
avg_time
])
self.logger.info(message)
def save_and_log_message(self,
report_for_unlabeled_data,
cur_valid_metric=None):
# report message
batch_metrics_message = self.batch_metrics_tracker_label.summary()
token_metrics_message = self.token_metrics_tracker_label.summary()
message_prefix = f'[Valid][{self.epoch}][Labeled]'
message = ' '.join(
[message_prefix, batch_metrics_message, token_metrics_message])
self.logger.info(message)
if report_for_unlabeled_data:
batch_metrics_message = self.batch_metrics_tracker_nolabel.summary(
)
token_metrics_message = self.token_metrics_tracker_nolabel.summary(
)
message_prefix = f'[Valid][{self.epoch}][Unlabeled]'
message = ' '.join(
[message_prefix, batch_metrics_message, token_metrics_message])
self.logger.info(message)
# save checkpoints
assert cur_valid_metric is not None
if self.is_decreased_valid_metric:
is_best = cur_valid_metric < self.best_valid_metric
else:
is_best = cur_valid_metric > self.best_valid_metric
if is_best:
self.best_valid_metric = cur_valid_metric
self.save(is_best)
def balance_metrics(self, metrics, batch_size):
if self.gpu > 1:
for metric in metrics:
if metric is not None:
assert len(metric) == self.gpu
intent_loss, mlm, token_mlm, mlm_num, kl, con = metrics
metrics = {}
intent_loss = torch.mean(intent_loss)
metrics['intent_loss'] = intent_loss
loss = intent_loss
if mlm is not None:
mlm_num = torch.sum(mlm_num)
token_mlm = torch.sum(mlm) * (batch_size / self.gpu) / mlm_num
mlm = torch.mean(mlm)
metrics['mlm_num'] = mlm_num
metrics['token_mlm'] = token_mlm
metrics['mlm'] = mlm
loss = loss + (token_mlm if self.func_model.token_loss else
mlm) * self.func_model.mlm_ratio
if kl is not None:
kl = torch.mean(kl)
metrics['kl'] = kl
loss = loss + kl * self.func_model.kl_ratio
if con is not None:
con = torch.mean(con)
metrics['con'] = con
loss = loss + con
metrics['loss'] = loss
assert 'loss' in metrics
return metrics['loss'], metrics
|
f49451496d86d3b8216eeedf7352fc234f30f75e
|
7a6b4705293709e32a6927ad4f76eb0549f3bea9
|
/orchestra/workflow/directory.py
|
438f9aa7d57e416bb700f22f2782178effde7aeb
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
b12io/orchestra
|
a71941d80d1eeddb73f301da8f601b2c31a4b279
|
ee8a29122a3491feae1e1c2c4699142726ae6c21
|
refs/heads/main
| 2023-08-20T17:46:36.360755
| 2023-06-27T13:32:46
| 2023-06-27T13:32:46
| 42,593,972
| 459
| 66
|
Apache-2.0
| 2023-06-27T13:32:48
| 2015-09-16T14:55:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
directory.py
|
import json
import os
from orchestra.core.errors import WorkflowError
def parse_workflow_directory(workflow_directory):
parsed = {
'versions': [],
}
# Verify that the directory exists.
if not os.path.exists(workflow_directory):
raise WorkflowError('Workflow directory does not exist.')
# Look for and parse the workflow manifest.
workflow_files = os.listdir(workflow_directory)
if 'workflow.json' not in workflow_files:
raise WorkflowError('No "workflow.json" manifest file found.')
with open(os.path.join(workflow_directory, 'workflow.json'), 'r') as f:
parsed['workflow'] = json.load(f)
# Look for and parse workflow version subdirectories.
workflow_subdirs = [
os.path.join(workflow_directory, workflow_file)
for workflow_file in workflow_files
if os.path.isdir(os.path.join(workflow_directory, workflow_file))]
for version_directory in workflow_subdirs:
version_files = os.listdir(version_directory)
if 'version.json' not in version_files:
continue # Subdirectory wasn't a workflow version.
with open(os.path.join(version_directory, 'version.json'), 'r') as f:
parsed['versions'].append(json.load(f))
# Complain if the workflow has no versions.
if len(parsed['versions']) == 0:
raise WorkflowError('Workflow directory {} does not contain any '
'versions'.format(workflow_directory))
return parsed
|
4d54c3a1643c77ad041a935e99daad8e775a7fa4
|
9c6fb2c163307c267dc62f4bb14fadb088f199eb
|
/tests/trainers/test_key_checking.py
|
04235416855606ae6aece16e7301b380a8b46120
|
[
"MIT"
] |
permissive
|
KevinMusgrave/pytorch-metric-learning
|
ca67a64bcef29f4f1b1e6b5617ddbb4746bfd170
|
ac607007dc62666f9de850cd5b8e5694ff0da1c2
|
refs/heads/master
| 2023-08-31T10:08:03.878425
| 2023-07-25T15:00:08
| 2023-07-25T15:00:08
| 217,113,590
| 5,552
| 688
|
MIT
| 2023-09-12T08:32:14
| 2019-10-23T17:20:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
test_key_checking.py
|
import copy
import logging
import unittest
import torch
from torchvision import datasets
from pytorch_metric_learning.losses import NTXentLoss, TripletMarginLoss
from pytorch_metric_learning.trainers import (
DeepAdversarialMetricLearning,
MetricLossOnly,
TrainWithClassifier,
TwoStreamMetricLoss,
)
from pytorch_metric_learning.utils import common_functions as c_f
logging.basicConfig()
logging.getLogger(c_f.LOGGER_NAME).setLevel(logging.INFO)
class TestMetricLossOnly(unittest.TestCase):
def test_metric_loss_only(self):
loss_fn = NTXentLoss()
dataset = datasets.FakeData()
model = torch.nn.Identity()
batch_size = 32
for trainer_class in [
MetricLossOnly,
DeepAdversarialMetricLearning,
TrainWithClassifier,
TwoStreamMetricLoss,
]:
model_dict = {"trunk": model}
optimizer_dict = {"trunk_optimizer": None}
loss_fn_dict = {"metric_loss": loss_fn}
lr_scheduler_dict = {"trunk_scheduler_by_iteration": None}
if trainer_class is DeepAdversarialMetricLearning:
model_dict["generator"] = model
loss_fn_dict["synth_loss"] = loss_fn
loss_fn_dict["g_adv_loss"] = TripletMarginLoss()
kwargs = {
"models": model_dict,
"optimizers": optimizer_dict,
"batch_size": batch_size,
"loss_funcs": loss_fn_dict,
"mining_funcs": {},
"dataset": dataset,
"freeze_these": ["trunk"],
"lr_schedulers": lr_scheduler_dict,
}
trainer_class(**kwargs)
for k in [
"models",
"mining_funcs",
"loss_funcs",
"freeze_these",
"lr_schedulers",
]:
new_kwargs = copy.deepcopy(kwargs)
if k == "models":
new_kwargs[k] = {}
if k == "mining_funcs":
new_kwargs[k] = {"dog": None}
if k == "loss_funcs":
if trainer_class is DeepAdversarialMetricLearning:
new_kwargs[k] = {}
else:
continue
if k == "freeze_these":
new_kwargs[k] = ["frog"]
if k == "lr_schedulers":
new_kwargs[k] = {"trunk_scheduler": None}
with self.assertRaises(AssertionError):
trainer_class(**new_kwargs)
if __name__ == "__main__":
unittest.main()
|
cc40fff7aff65967055ef0b0d8a0196674b9379d
|
c6f236361649a4bf56576fcb499ca80b4b00bc7f
|
/tests/my_dict_test.py
|
bfab349b14b1745c495ff2be291aabb3b43128d9
|
[
"MIT"
] |
permissive
|
kaste/mockito-python
|
e3ae81630f3123aed5ef1cbcb247e61a6bf63f9d
|
7483a5f76c7e7fb8121c2dc129bfb147a24e8eca
|
refs/heads/master
| 2022-10-09T04:37:02.227871
| 2022-09-30T21:07:49
| 2022-09-30T21:07:49
| 56,911,241
| 111
| 18
|
MIT
| 2022-08-25T20:46:41
| 2016-04-23T09:23:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
my_dict_test.py
|
import pytest
from mockito.mock_registry import _Dict
from mockito.mocking import Mock
class TestCustomDictLike:
def testAssignKeyValuePair(self):
td = _Dict()
obj = {}
mock = Mock(None)
td[obj] = mock
def testGetValueForKey(self):
td = _Dict()
obj = {}
mock = Mock(None)
td[obj] = mock
assert td.get(obj) == mock
def testReplaceValueForSameKey(self):
td = _Dict()
obj = {}
mock1 = Mock(None)
mock2 = Mock(None)
td[obj] = mock1
td[obj] = mock2
assert td.pop(obj) == mock2
with pytest.raises(KeyError):
td.pop(obj)
def testPopKey(self):
td = _Dict()
obj = {}
mock = Mock(None)
td[obj] = mock
assert td.pop(obj) == mock
assert td.get(obj) is None
def testIterValues(self):
td = _Dict()
obj = {}
mock = Mock(None)
td[obj] = mock
assert td.values() == [mock]
def testClear(self):
td = _Dict()
obj = {}
mock = Mock(None)
td[obj] = mock
td.clear()
assert td.get(obj) is None
|
5d6199d22c23195b4d54323e0efb1b236260a2ec
|
06955f6c7f2a0ce54413fcb9ed10453298e36dcf
|
/fsspec/implementations/webhdfs.py
|
cc595934f9a0161be24d3e300260fb73d4fd9784
|
[
"BSD-3-Clause"
] |
permissive
|
fsspec/filesystem_spec
|
aba07f52331dc380370af326ea79a0f2061816c0
|
212c26f34ada124e3d6ff9b2b520fdadc4903094
|
refs/heads/master
| 2023-09-01T00:02:37.323404
| 2023-08-31T19:14:12
| 2023-08-31T19:14:12
| 130,727,736
| 308
| 135
|
BSD-3-Clause
| 2023-09-14T20:25:47
| 2018-04-23T16:45:08
|
Python
|
UTF-8
|
Python
| false
| false
| 15,391
|
py
|
webhdfs.py
|
# https://hadoop.apache.org/docs/r1.0.4/webhdfs.html
import logging
import os
import secrets
import shutil
import tempfile
import uuid
from contextlib import suppress
from urllib.parse import quote
import requests
from ..spec import AbstractBufferedFile, AbstractFileSystem
from ..utils import infer_storage_options, tokenize
logger = logging.getLogger("webhdfs")
class WebHDFS(AbstractFileSystem):
"""
Interface to HDFS over HTTP using the WebHDFS API. Supports also HttpFS gateways.
Three auth mechanisms are supported:
insecure: no auth is done, and the user is assumed to be whoever they
say they are (parameter ``user``), or a predefined value such as
"dr.who" if not given
spnego: when kerberos authentication is enabled, auth is negotiated by
requests_kerberos https://github.com/requests/requests-kerberos .
This establishes a session based on existing kinit login and/or
specified principal/password; parameters are passed with ``kerb_kwargs``
token: uses an existing Hadoop delegation token from another secured
service. Indeed, this client can also generate such tokens when
not insecure. Note that tokens expire, but can be renewed (by a
previously specified user) and may allow for proxying.
"""
tempdir = str(tempfile.gettempdir())
protocol = "webhdfs", "webHDFS"
def __init__(
self,
host,
port=50070,
kerberos=False,
token=None,
user=None,
proxy_to=None,
kerb_kwargs=None,
data_proxy=None,
use_https=False,
**kwargs,
):
"""
Parameters
----------
host: str
Name-node address
port: int
Port for webHDFS
kerberos: bool
Whether to authenticate with kerberos for this connection
token: str or None
If given, use this token on every call to authenticate. A user
and user-proxy may be encoded in the token and should not be also
given
user: str or None
If given, assert the user name to connect with
proxy_to: str or None
If given, the user has the authority to proxy, and this value is
the user in who's name actions are taken
kerb_kwargs: dict
Any extra arguments for HTTPKerberosAuth, see
`<https://github.com/requests/requests-kerberos/blob/master/requests_kerberos/kerberos_.py>`_
data_proxy: dict, callable or None
If given, map data-node addresses. This can be necessary if the
HDFS cluster is behind a proxy, running on Docker or otherwise has
a mismatch between the host-names given by the name-node and the
address by which to refer to them from the client. If a dict,
maps host names ``host->data_proxy[host]``; if a callable, full
URLs are passed, and function must conform to
``url->data_proxy(url)``.
use_https: bool
Whether to connect to the Name-node using HTTPS instead of HTTP
kwargs
"""
if self._cached:
return
super().__init__(**kwargs)
self.url = "{protocol}://{host}:{port}/webhdfs/v1".format(
protocol="https" if use_https else "http", host=host, port=port
)
self.kerb = kerberos
self.kerb_kwargs = kerb_kwargs or {}
self.pars = {}
self.proxy = data_proxy or {}
if token is not None:
if user is not None or proxy_to is not None:
raise ValueError(
"If passing a delegation token, must not set "
"user or proxy_to, as these are encoded in the"
" token"
)
self.pars["delegation"] = token
if user is not None:
self.pars["user.name"] = user
if proxy_to is not None:
self.pars["doas"] = proxy_to
if kerberos and user is not None:
raise ValueError(
"If using Kerberos auth, do not specify the "
"user, this is handled by kinit."
)
self._connect()
self._fsid = "webhdfs_" + tokenize(host, port)
@property
def fsid(self):
return self._fsid
def _connect(self):
self.session = requests.Session()
if self.kerb:
from requests_kerberos import HTTPKerberosAuth
self.session.auth = HTTPKerberosAuth(**self.kerb_kwargs)
def _call(self, op, method="get", path=None, data=None, redirect=True, **kwargs):
url = self.url + quote(path or "")
args = kwargs.copy()
args.update(self.pars)
args["op"] = op.upper()
logger.debug("sending %s with %s", url, method)
out = self.session.request(
method=method.upper(),
url=url,
params=args,
data=data,
allow_redirects=redirect,
)
if out.status_code in [400, 401, 403, 404, 500]:
try:
err = out.json()
msg = err["RemoteException"]["message"]
exp = err["RemoteException"]["exception"]
except (ValueError, KeyError):
pass
else:
if exp in ["IllegalArgumentException", "UnsupportedOperationException"]:
raise ValueError(msg)
elif exp in ["SecurityException", "AccessControlException"]:
raise PermissionError(msg)
elif exp in ["FileNotFoundException"]:
raise FileNotFoundError(msg)
else:
raise RuntimeError(msg)
out.raise_for_status()
return out
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
replication=None,
permissions=None,
**kwargs,
):
"""
Parameters
----------
path: str
File location
mode: str
'rb', 'wb', etc.
block_size: int
Client buffer size for read-ahead or write buffer
autocommit: bool
If False, writes to temporary file that only gets put in final
location upon commit
replication: int
Number of copies of file on the cluster, write mode only
permissions: str or int
posix permissions, write mode only
kwargs
Returns
-------
WebHDFile instance
"""
block_size = block_size or self.blocksize
return WebHDFile(
self,
path,
mode=mode,
block_size=block_size,
tempdir=self.tempdir,
autocommit=autocommit,
replication=replication,
permissions=permissions,
)
@staticmethod
def _process_info(info):
info["type"] = info["type"].lower()
info["size"] = info["length"]
return info
@classmethod
def _strip_protocol(cls, path):
return infer_storage_options(path)["path"]
@staticmethod
def _get_kwargs_from_urls(urlpath):
out = infer_storage_options(urlpath)
out.pop("path", None)
out.pop("protocol", None)
if "username" in out:
out["user"] = out.pop("username")
return out
def info(self, path):
out = self._call("GETFILESTATUS", path=path)
info = out.json()["FileStatus"]
info["name"] = path
return self._process_info(info)
def ls(self, path, detail=False):
out = self._call("LISTSTATUS", path=path)
infos = out.json()["FileStatuses"]["FileStatus"]
for info in infos:
self._process_info(info)
info["name"] = path.rstrip("/") + "/" + info["pathSuffix"]
if detail:
return sorted(infos, key=lambda i: i["name"])
else:
return sorted(info["name"] for info in infos)
def content_summary(self, path):
"""Total numbers of files, directories and bytes under path"""
out = self._call("GETCONTENTSUMMARY", path=path)
return out.json()["ContentSummary"]
def ukey(self, path):
"""Checksum info of file, giving method and result"""
out = self._call("GETFILECHECKSUM", path=path, redirect=False)
if "Location" in out.headers:
location = self._apply_proxy(out.headers["Location"])
out2 = self.session.get(location)
out2.raise_for_status()
return out2.json()["FileChecksum"]
else:
out.raise_for_status()
return out.json()["FileChecksum"]
def home_directory(self):
"""Get user's home directory"""
out = self._call("GETHOMEDIRECTORY")
return out.json()["Path"]
def get_delegation_token(self, renewer=None):
"""Retrieve token which can give the same authority to other uses
Parameters
----------
renewer: str or None
User who may use this token; if None, will be current user
"""
if renewer:
out = self._call("GETDELEGATIONTOKEN", renewer=renewer)
else:
out = self._call("GETDELEGATIONTOKEN")
t = out.json()["Token"]
if t is None:
raise ValueError("No token available for this user/security context")
return t["urlString"]
def renew_delegation_token(self, token):
"""Make token live longer. Returns new expiry time"""
out = self._call("RENEWDELEGATIONTOKEN", method="put", token=token)
return out.json()["long"]
def cancel_delegation_token(self, token):
"""Stop the token from being useful"""
self._call("CANCELDELEGATIONTOKEN", method="put", token=token)
def chmod(self, path, mod):
"""Set the permission at path
Parameters
----------
path: str
location to set (file or directory)
mod: str or int
posix epresentation or permission, give as oct string, e.g, '777'
or 0o777
"""
self._call("SETPERMISSION", method="put", path=path, permission=mod)
def chown(self, path, owner=None, group=None):
"""Change owning user and/or group"""
kwargs = {}
if owner is not None:
kwargs["owner"] = owner
if group is not None:
kwargs["group"] = group
self._call("SETOWNER", method="put", path=path, **kwargs)
def set_replication(self, path, replication):
"""
Set file replication factor
Parameters
----------
path: str
File location (not for directories)
replication: int
Number of copies of file on the cluster. Should be smaller than
number of data nodes; normally 3 on most systems.
"""
self._call("SETREPLICATION", path=path, method="put", replication=replication)
def mkdir(self, path, **kwargs):
self._call("MKDIRS", method="put", path=path)
def makedirs(self, path, exist_ok=False):
if exist_ok is False and self.exists(path):
raise FileExistsError(path)
self.mkdir(path)
def mv(self, path1, path2, **kwargs):
self._call("RENAME", method="put", path=path1, destination=path2)
def rm(self, path, recursive=False, **kwargs):
self._call(
"DELETE",
method="delete",
path=path,
recursive="true" if recursive else "false",
)
def rm_file(self, path, **kwargs):
self.rm(path)
def cp_file(self, lpath, rpath, **kwargs):
with self.open(lpath) as lstream:
tmp_fname = "/".join([self._parent(rpath), f".tmp.{secrets.token_hex(16)}"])
# Perform an atomic copy (stream to a temporary file and
# move it to the actual destination).
try:
with self.open(tmp_fname, "wb") as rstream:
shutil.copyfileobj(lstream, rstream)
self.mv(tmp_fname, rpath)
except BaseException: # noqa
with suppress(FileNotFoundError):
self.rm(tmp_fname)
raise
def _apply_proxy(self, location):
if self.proxy and callable(self.proxy):
location = self.proxy(location)
elif self.proxy:
# as a dict
for k, v in self.proxy.items():
location = location.replace(k, v, 1)
return location
class WebHDFile(AbstractBufferedFile):
"""A file living in HDFS over webHDFS"""
def __init__(self, fs, path, **kwargs):
super().__init__(fs, path, **kwargs)
kwargs = kwargs.copy()
if kwargs.get("permissions", None) is None:
kwargs.pop("permissions", None)
if kwargs.get("replication", None) is None:
kwargs.pop("replication", None)
self.permissions = kwargs.pop("permissions", 511)
tempdir = kwargs.pop("tempdir")
if kwargs.pop("autocommit", False) is False:
self.target = self.path
self.path = os.path.join(tempdir, str(uuid.uuid4()))
def _upload_chunk(self, final=False):
"""Write one part of a multi-block file upload
Parameters
==========
final: bool
This is the last block, so should complete file, if
self.autocommit is True.
"""
out = self.fs.session.post(
self.location,
data=self.buffer.getvalue(),
headers={"content-type": "application/octet-stream"},
)
out.raise_for_status()
return True
def _initiate_upload(self):
"""Create remote file/upload"""
kwargs = self.kwargs.copy()
if "a" in self.mode:
op, method = "APPEND", "POST"
else:
op, method = "CREATE", "PUT"
kwargs["overwrite"] = "true"
out = self.fs._call(op, method, self.path, redirect=False, **kwargs)
location = self.fs._apply_proxy(out.headers["Location"])
if "w" in self.mode:
# create empty file to append to
out2 = self.fs.session.put(
location, headers={"content-type": "application/octet-stream"}
)
out2.raise_for_status()
# after creating empty file, change location to append to
out2 = self.fs._call("APPEND", "POST", self.path, redirect=False, **kwargs)
self.location = self.fs._apply_proxy(out2.headers["Location"])
def _fetch_range(self, start, end):
start = max(start, 0)
end = min(self.size, end)
if start >= end or start >= self.size:
return b""
out = self.fs._call(
"OPEN", path=self.path, offset=start, length=end - start, redirect=False
)
out.raise_for_status()
if "Location" in out.headers:
location = out.headers["Location"]
out2 = self.fs.session.get(self.fs._apply_proxy(location))
return out2.content
else:
return out.content
def commit(self):
self.fs.mv(self.path, self.target)
def discard(self):
self.fs.rm(self.path)
|
afa4b070e97f153354edfd14728ee4342df8b8de
|
5f1881006aaf4f3c2515f375ad29c15fd6612de2
|
/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/embeddings/__init__.py
|
d6c86f24920089cc80579f3c1fe2310e4a5199eb
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
microsoft/ContextualSP
|
4edb598d40f683f9a1143b92a9d24e1066d51ec4
|
4198ebce942f4afe7ddca6a96ab6f4464ade4518
|
refs/heads/master
| 2023-08-02T22:08:40.503853
| 2023-07-14T07:22:50
| 2023-07-14T07:22:50
| 255,534,819
| 332
| 70
|
MIT
| 2023-07-25T19:23:48
| 2020-04-14T07:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
__init__.py
|
from pathlib import Path
from .load_glove_embedding import load_glove_embedding
from .load_fasttext_embedding import load_fasttext_embedding
DATA_ROOT = Path(__file__).parent
EMBED_RANK = DATA_ROOT.joinpath('embed_rank.txt')
EMBED_10 = DATA_ROOT.joinpath('embed_10_word2vec.txt')
EMBED_10_GLOVE = DATA_ROOT.joinpath('embed_10_glove.txt')
|
b0d0354ffc263868500becfd50a367a2fc8dcf5a
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCommerceTransportAdStocktaskresultQueryResponse.py
|
fe5cd62d612b8d2834d9ea43b963f18d47517de7
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
AlipayCommerceTransportAdStocktaskresultQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.StockTaskResult import StockTaskResult
class AlipayCommerceTransportAdStocktaskresultQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceTransportAdStocktaskresultQueryResponse, self).__init__()
self._stock_task_result = None
@property
def stock_task_result(self):
return self._stock_task_result
@stock_task_result.setter
def stock_task_result(self, value):
if isinstance(value, StockTaskResult):
self._stock_task_result = value
else:
self._stock_task_result = StockTaskResult.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayCommerceTransportAdStocktaskresultQueryResponse, self).parse_response_content(response_content)
if 'stock_task_result' in response:
self.stock_task_result = response['stock_task_result']
|
38210eae33191f3349f6661779a61e9b241922b5
|
4091caecbc727e6d6ae0d827afce11c5979a84fd
|
/tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/sr_evaluator.py
|
fda66de078bc92e80dd15058412ba323ce56bf87
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/open_model_zoo
|
fdb03dd40bfccb854e4ed4f7b9beaa90596963cd
|
7929adbe91e9cfe8dc5dc1daad5ae7392f9719a0
|
refs/heads/master
| 2023-08-18T18:03:47.254427
| 2023-08-18T10:54:31
| 2023-08-18T10:54:31
| 153,097,694
| 1,712
| 730
|
Apache-2.0
| 2023-09-11T11:31:20
| 2018-10-15T10:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 11,741
|
py
|
sr_evaluator.py
|
"""
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import numpy as np
from .base_custom_evaluator import BaseCustomEvaluator
from .base_models import BaseCascadeModel, BaseDLSDKModel, BaseTFModel, BaseOpenVINOModel, create_model
from ...adapters import create_adapter
from ...config import ConfigError
from ...utils import contains_all, extract_image_representations, parse_partial_shape
class SuperResolutionFeedbackEvaluator(BaseCustomEvaluator):
def __init__(self, dataset_config, launcher, model, orig_config):
super().__init__(dataset_config, launcher, orig_config)
self.model = model
if hasattr(self.model, 'adapter'):
self.adapter_type = self.model.adapter.__provider__
@classmethod
def from_configs(cls, config, delayed_model_loading=False, orig_config=None):
dataset_config, launcher, _ = cls.get_dataset_and_launcher_info(config)
model = SRFModel(
config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob'),
delayed_model_loading
)
return cls(dataset_config, launcher, model, orig_config)
def _process(self, output_callback, calculate_metrics, progress_reporter, metric_config, csv_file):
self.model.init_feedback(self.dataset.data_reader)
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
self.model.fill_feedback(batch_inputs)
batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)
batch_inputs_extr, _ = extract_image_representations(batch_inputs)
batch_raw_prediction, batch_prediction = self.model.predict(
batch_identifiers, batch_inputs_extr
)
annotation, prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction)
self.model.feedback(prediction)
metrics_result = self._get_metrics_result(batch_input_ids, annotation, prediction, calculate_metrics)
if output_callback:
output_callback(batch_raw_prediction[0], metrics_result=metrics_result,
element_identifiers=batch_identifiers, dataset_indices=batch_input_ids)
self._update_progress(progress_reporter, metric_config, batch_id, len(prediction), csv_file)
class SRFModel(BaseCascadeModel):
def __init__(self, network_info, launcher, models_args, is_blob, delayed_model_loading=False):
super().__init__(network_info, launcher)
parts = ['srmodel']
network_info = self.fill_part_with_model(network_info, parts, models_args, is_blob, delayed_model_loading)
if not contains_all(network_info, parts) and not delayed_model_loading:
raise ConfigError('network_info should contain srmodel field')
self._model_mapping = {
'dlsdk': ModelDLSDKModel,
'openvino': ModelOVModel,
'tf': ModelTFModel,
}
self.srmodel = create_model(network_info['srmodel'], launcher, self._model_mapping, 'srmodel',
delayed_model_loading)
self.feedback = self.srmodel.feedback
self.init_feedback = self.srmodel.init_feedback
self.fill_feedback = self.srmodel.fill_feedback
self._part_by_name = {'srmodel': self.srmodel}
self._raw_outs = OrderedDict()
def predict(self, identifiers, input_data):
predictions, raw_outputs = [], []
for data in input_data:
output, prediction = self.srmodel.predict(identifiers, data)
raw_outputs.append(output)
predictions.append(prediction)
return raw_outputs, predictions
def _add_raw_predictions(self, prediction):
for key, output in prediction.items():
if key not in self._raw_outs:
self._raw_outs[key] = []
self._raw_outs[key].append(output)
class FeedbackMixin:
def configure_feedback(self):
self._idx_to_name = {}
self._name_to_idx = {}
self._feedback_name = self.network_info['feedback_input']
self._feedback_data = {self._feedback_name: None}
self._first_step = True
self._inputs = self.network_info['inputs']
self._feedback_inputs = {self._feedback_name: [t for t in self._inputs if t['name'] == self._feedback_name][0]}
for input_info in self._inputs:
idx = int(input_info['value'])
self._idx_to_name[idx] = input_info['name']
self._name_to_idx[input_info['name']] = idx
self._feedback_idx = self._name_to_idx[self._feedback_name]
def init_feedback(self, reader):
info = self._feedback_inputs[self._feedback_name]
self._feedback_data[self._feedback_name] = reader.read(info['initializer'])
def feedback(self, data):
data = data[0]
self._feedback_data[self._feedback_name] = data[0].value
def fill_feedback(self, data):
data[0].data[self._feedback_idx] = self._feedback_data[self._feedback_name]
return data
class ModelDLSDKModel(BaseDLSDKModel, FeedbackMixin):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.partial_shapes = {}
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({key: data.shape for key, data in input_data.items()})
raw_result = self.exec_network.infer(input_data)
result = self.adapter.process([raw_result], identifiers, [{}])
return raw_result, result
def fit_to_input(self, input_data):
has_info = hasattr(self.exec_network, 'input_info')
if has_info:
input_info = self.exec_network.input_info
else:
input_info = self.exec_network.inputs
fitted = {}
for name, info in input_info.items():
data = input_data[self._name_to_idx[name]]
data = np.expand_dims(data, axis=0)
data = np.transpose(data, [0, 3, 1, 2])
if not info.input_data.is_dynamic:
assert tuple(info.input_data.shape) == np.shape(data)
fitted[name] = data
return fitted
def set_input_and_output(self):
has_info = hasattr(self.exec_network, 'input_info')
input_info = self.exec_network.input_info if has_info else self.exec_network.inputs
input_blob = next(iter(input_info))
with_prefix = input_blob.startswith(self.default_model_suffix + '_')
if (with_prefix != self.with_prefix) and with_prefix:
self.network_info['feedback_input'] = '_'.join([self.default_model_suffix,
self.network_info['feedback_input']])
for inp in self.network_info['inputs']:
inp['name'] = '_'.join([self.default_model_suffix, inp['name']])
if 'blob' in inp.keys():
inp['blob'] = '_'.join([self.default_model_suffix, inp['blob']])
self.network_info['adapter']['target_out'] = '_'.join([self.default_model_suffix,
self.network_info['adapter']['target_out']])
self.with_prefix = with_prefix
def load_network(self, network, launcher):
super().load_network(network, launcher)
self.set_input_and_output()
class ModelOVModel(BaseOpenVINOModel, FeedbackMixin):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.partial_shapes = {}
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({key: data.shape for key, data in input_data.items()})
raw_result, raw_t_results = self.infer(input_data, raw_results=True)
result = self.adapter.process([raw_result], identifiers, [{}])
return raw_t_results, result
def fit_to_input(self, input_data):
fitted = {}
for name, info in self.inputs.items():
data = input_data[self._name_to_idx[name]]
data = np.expand_dims(data, axis=0)
if parse_partial_shape(info.get_partial_shape())[1] == 3:
data = np.transpose(data, (0, 3, 1, 2))
if not info.get_partial_shape().is_dynamic:
assert tuple(parse_partial_shape(info.get_partial_shape())) == np.shape(data)
fitted[name] = data
return fitted
def load_network(self, network, launcher):
super().load_network(network, launcher)
self.set_input_and_output()
def set_input_and_output(self):
input_info = self.inputs
input_blob = next(iter(input_info))
out_mapping = {}
outputs = self.network.outputs if self.network is not None else self.exec_network.outputs
for out in outputs:
if not out.names:
continue
for name in out.names:
out_mapping[name] = out.get_node().friendly_name
self.adapter.additional_output_mapping = out_mapping
with_prefix = input_blob.startswith(self.default_model_suffix + '_')
if (with_prefix != self.with_prefix) and with_prefix:
self.network_info['feedback_input'] = '_'.join([self.default_model_suffix,
self.network_info['feedback_input']])
for inp in self.network_info['inputs']:
inp['name'] = '_'.join([self.default_model_suffix, inp['name']])
if 'blob' in inp.keys():
inp['blob'] = '_'.join([self.default_model_suffix, inp['blob']])
self.with_prefix = with_prefix
class ModelTFModel(BaseTFModel, FeedbackMixin):
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
raw_result = self.inference_session.predict([input_data])
result = self.adapter.process(raw_result, identifiers, [{}])
return raw_result, result
def fit_to_input(self, input_data):
fitted = {}
for idx, data in enumerate(input_data):
name = self._idx_to_name[idx]
data = np.expand_dims(data, axis=0)
fitted[name] = data
return fitted
|
1a3c5a56c02301fb027f8a6d0094e5b0c95b32c1
|
cd99fe697ce43e30a64df9cc1df9470d1b0712ab
|
/forge/ethyr/torch/stim.py
|
d8b0f3fd65fc74f1fdb42c128ebb69e667d88e01
|
[
"MIT"
] |
permissive
|
openai/neural-mmo
|
ecde7382aa06123b9951cd78712e5eb1497204aa
|
38fd0310bc784de9b86e5144d0e78f4d31005e6b
|
refs/heads/v1.0
| 2023-08-05T02:48:18.988444
| 2019-09-13T23:46:13
| 2019-09-13T23:46:13
| 173,154,826
| 1,607
| 203
|
MIT
| 2023-07-21T13:04:40
| 2019-02-28T17:17:24
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
stim.py
|
from forge.ethyr.torch import utils as tu
from forge.ethyr import stim
from pdb import set_trace as T
class Stim:
def __init__(self, ent, env, config):
sz = config.STIM
flat = stim.entity(ent, ent, config)
conv, ents = stim.environment(env, ent, sz, config)
self.flat = tu.var(flat)
self.conv = tu.var(conv)
self.ents = tu.var(ents)
|
b3a43edb033fbc19c681f822e51da2f60f8c2942
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/contrib/gunicorn/test_gunicorn.py
|
e3335ca87fd948bdbd8313ceb8431cf21cda87c5
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,261
|
py
|
test_gunicorn.py
|
from contextlib import contextmanager
import json
import os
import subprocess
import sys
import time
from typing import Dict
from typing import NamedTuple
from typing import Optional
import pytest
from ddtrace.internal import compat
from ddtrace.internal.utils.retry import RetryError # noqa
from tests.utils import snapshot_context
from tests.webclient import Client
SERVICE_INTERVAL = 1
GunicornServerSettings = NamedTuple(
"GunicornServerSettings",
[
("env", Dict[str, str]),
("directory", str),
("app_path", str),
("num_workers", str),
("worker_class", str),
("bind", str),
("use_ddtracerun", bool),
("import_auto_in_postworkerinit", bool),
],
)
IMPORT_AUTO = "import ddtrace.auto"
def parse_payload(data):
decoded = data
if sys.version_info[1] == 5:
decoded = data.decode("utf-8")
return json.loads(decoded)
def _gunicorn_settings_factory(
env=None, # type: Dict[str, str]
directory=None, # type: str
app_path="tests.contrib.gunicorn.wsgi_mw_app:app", # type: str
num_workers="4", # type: str
worker_class="sync", # type: str
bind="0.0.0.0:8080", # type: str
use_ddtracerun=True, # type: bool
import_auto_in_postworkerinit=False, # type: bool
import_auto_in_app=None, # type: Optional[bool]
enable_module_cloning=False, # type: bool
debug_mode=False, # type: bool
dd_service=None, # type: Optional[str]
schema_version=None, # type: Optional[str]
rlock=False, # type: bool
):
# type: (...) -> GunicornServerSettings
"""Factory for creating gunicorn settings with simple defaults if settings are not defined."""
if directory is None:
directory = os.getcwd()
if env is None:
env = os.environ.copy()
if import_auto_in_app is not None:
env["_DD_TEST_IMPORT_AUTO"] = str(import_auto_in_app)
env["DD_UNLOAD_MODULES_FROM_SITECUSTOMIZE"] = "1" if enable_module_cloning else "0"
env["DD_REMOTE_CONFIGURATION_ENABLED"] = str(True)
env["DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS"] = str(SERVICE_INTERVAL)
env["DD_PROFILING_UPLOAD_INTERVAL"] = str(SERVICE_INTERVAL)
env["DD_TRACE_DEBUG"] = str(debug_mode)
if dd_service is not None:
env["DD_SERVICE"] = dd_service
if schema_version is not None:
env["DD_TRACE_SPAN_ATTRIBUTE_SCHEMA"] = schema_version
if rlock is not None:
env["DD_TRACE_SPAN_AGGREGATOR_RLOCK"] = "true"
return GunicornServerSettings(
env=env,
directory=directory,
app_path=app_path,
num_workers=num_workers,
worker_class=worker_class,
bind=bind,
use_ddtracerun=use_ddtracerun,
import_auto_in_postworkerinit=import_auto_in_postworkerinit,
)
def build_config_file(gunicorn_server_settings):
post_worker_init = " {}".format(
IMPORT_AUTO if gunicorn_server_settings.import_auto_in_postworkerinit else "",
)
cfg = """
def post_worker_init(worker):
pass
{post_worker_init}
workers = {num_workers}
worker_class = "{worker_class}"
bind = "{bind}"
""".format(
post_worker_init=post_worker_init,
bind=gunicorn_server_settings.bind,
num_workers=gunicorn_server_settings.num_workers,
worker_class=gunicorn_server_settings.worker_class,
)
return cfg
@contextmanager
def gunicorn_server(gunicorn_server_settings, tmp_path):
cfg_file = tmp_path / "gunicorn.conf.py"
cfg = build_config_file(gunicorn_server_settings)
cfg_file.write_text(compat.stringify(cfg))
cmd = []
if gunicorn_server_settings.use_ddtracerun:
cmd = ["ddtrace-run"]
cmd += ["gunicorn", "--config", str(cfg_file), str(gunicorn_server_settings.app_path)]
print("Running %r with configuration file %s" % (" ".join(cmd), cfg))
gunicorn_server_settings.env["DD_REMOTE_CONFIGURATION_ENABLED"] = "true"
server_process = subprocess.Popen(
cmd,
env=gunicorn_server_settings.env,
cwd=gunicorn_server_settings.directory,
stdout=sys.stdout,
stderr=sys.stderr,
close_fds=True,
preexec_fn=os.setsid,
)
try:
client = Client("http://%s" % gunicorn_server_settings.bind)
try:
print("Waiting for server to start")
client.wait(max_tries=100, delay=0.1)
print("Server started")
except RetryError:
raise TimeoutError("Server failed to start, see stdout and stderr logs")
time.sleep(SERVICE_INTERVAL)
yield server_process, client
try:
client.get_ignored("/shutdown")
except Exception:
pass
finally:
server_process.terminate()
server_process.wait()
SETTINGS_GEVENT_DDTRACERUN_MODULE_CLONE = _gunicorn_settings_factory(worker_class="gevent", enable_module_cloning=True)
SETTINGS_GEVENT_DDTRACERUN = _gunicorn_settings_factory(
worker_class="gevent",
)
SETTINGS_GEVENT_APPIMPORT = _gunicorn_settings_factory(
worker_class="gevent",
use_ddtracerun=False,
import_auto_in_app=True,
)
SETTINGS_GEVENT_POSTWORKERIMPORT = _gunicorn_settings_factory(
worker_class="gevent",
use_ddtracerun=False,
import_auto_in_postworkerinit=True,
)
SETTINGS_GEVENT_DDTRACERUN_DEBUGMODE_MODULE_CLONE = _gunicorn_settings_factory(
worker_class="gevent",
debug_mode=True,
enable_module_cloning=True,
)
SETTINGS_GEVENT_SPANAGGREGATOR_RLOCK = _gunicorn_settings_factory(
worker_class="gevent",
use_ddtracerun=False,
import_auto_in_app=True,
rlock=True,
)
@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Gunicorn is only supported up to 3.10")
def test_no_known_errors_occur(tmp_path):
for gunicorn_server_settings in [
SETTINGS_GEVENT_APPIMPORT,
SETTINGS_GEVENT_POSTWORKERIMPORT,
SETTINGS_GEVENT_DDTRACERUN,
SETTINGS_GEVENT_DDTRACERUN_MODULE_CLONE,
SETTINGS_GEVENT_DDTRACERUN_DEBUGMODE_MODULE_CLONE,
SETTINGS_GEVENT_SPANAGGREGATOR_RLOCK,
]:
with gunicorn_server(gunicorn_server_settings, tmp_path) as context:
_, client = context
response = client.get("/")
assert response.status_code == 200
payload = parse_payload(response.content)
assert payload["profiler"]["is_active"] is True
@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Gunicorn is only supported up to 3.10")
def test_span_schematization(tmp_path):
for schema_version in [None, "v0", "v1"]:
for service_name in [None, "mysvc"]:
gunicorn_settings = _gunicorn_settings_factory(
worker_class="gevent",
dd_service=service_name,
schema_version=schema_version,
)
with snapshot_context(
token="tests.contrib.gunicorn.test_gunicorn.test_span_schematization[{}-{}]".format(
service_name, schema_version
),
ignores=["meta.result_class"],
):
with gunicorn_server(gunicorn_settings, tmp_path) as context:
_, client = context
response = client.get("/")
assert response.status_code == 200
|
05bab4c4a0c3e1667974e0799d68e396e612b87d
|
aac83d1162f41ed6de230ebb8eb9dfa760017443
|
/crawler/tools/legacy/export.py
|
b812a12ab9bc52d3ff5d007ba7755a195ab2648f
|
[
"MIT"
] |
permissive
|
g0v/tw-rental-house-data
|
e4e405f98cc5a38a635ba1e1593a67f23638455f
|
28957d50a47945c75b7138120b6eca5570744b69
|
refs/heads/master
| 2023-08-28T11:29:32.263983
| 2023-08-27T05:26:22
| 2023-08-27T05:26:22
| 133,367,858
| 142
| 30
|
MIT
| 2023-02-08T02:16:37
| 2018-05-14T13:50:37
|
Python
|
UTF-8
|
Python
| false
| false
| 12,187
|
py
|
export.py
|
import sys
import os
import csv
import argparse
import json
from datetime import datetime, timedelta
from django.utils import timezone
from django.core.paginator import Paginator
sys.path.append('{}/..'.format(
os.path.dirname(os.path.realpath(__file__))))
from tools.utils import load_django
load_django()
from tools.json_writer import ListWriter
from rental.models import House, HouseEtc
from rental import enums
vendor_stats = {'_total': 0}
page_size = 3000
structured_headers = [
{'en': 'vendor_house_id', 'zh': '物件編號'},
{'en': 'vendor', 'zh': '租屋平台', 'field': 'name'},
{'en': 'vendor_house_url', 'zh': '物件網址'},
{'en': 'created', 'zh': '物件首次發現時間'},
{'en': 'updated', 'zh': '物件最後更新時間'},
{'en': 'top_region', 'zh': '縣市', 'is_enum': enums.TopRegionType},
{'en': 'sub_region', 'zh': '鄉鎮市區', 'is_enum': enums.SubRegionType},
{'en': 'deal_status', 'zh': '房屋出租狀態', 'is_enum': enums.DealStatusType},
{'en': 'deal_time', 'zh': '出租大約時間'},
{'en': 'n_day_deal', 'zh': '出租所費天數'},
{'en': 'monthly_price', 'zh': '月租金'},
{'en': 'deposit_type', 'zh': '押金類型', 'is_enum': enums.DepositType},
{'en': 'n_month_deposit', 'zh': '押金月數'},
{'en': 'deposit', 'zh': '押金金額'},
{'en': 'is_require_management_fee', 'zh': '需要管理費?'},
{'en': 'monthly_management_fee', 'zh': '月管理費'},
{'en': 'has_parking', 'zh': '提供車位?'},
{'en': 'is_require_parking_fee', 'zh': '需要停車費?'},
{'en': 'monthly_parking_fee', 'zh': '月停車費'},
{'en': 'per_ping_price', 'zh': '每坪租金(含管理費與停車費)'},
# {'en': 'detail_dict', 'zh': '建築類型_原始', 'field': 'building_type',
# 'fn': lambda x: x['side_metas'].get('型態', '')},
{'en': 'building_type', 'zh': '建築類型', 'is_enum': enums.BuildingType},
{'en': 'property_type', 'zh': '物件類型', 'is_enum': enums.PropertyType},
{'en': 'is_rooftop', 'zh': '自報頂加?'},
# {'en': 'detail_dict', 'zh': '樓層_原始', 'field': 'raw_floor',
# 'fn': lambda x: x['side_metas'].get('樓層', '')},
{'en': 'floor', 'zh': '所在樓層'},
{'en': 'total_floor', 'zh': '建物樓高'},
{'en': 'dist_to_highest_floor', 'zh': '距頂樓層數'},
# {'en': 'detail_dict', 'zh': '坪數_原始', 'field': 'ping_raw',
# 'fn': lambda x: x['side_metas'].get('坪數', '')},
{'en': 'floor_ping', 'zh': '坪數'},
{'en': 'n_balcony', 'zh': '陽台數'},
{'en': 'n_bath_room', 'zh': '衛浴數'},
{'en': 'n_bed_room', 'zh': '房數'},
{'en': 'n_living_room', 'zh': '客廳數'},
{'en': 'apt_feature_code', 'zh': '格局編碼(陽台/衛浴/房/廳)',
'fn': lambda x: '_{}'.format(x) if x else ''},
# {'en': 'rough_address', 'zh': '約略住址'},
# {'en': 'rough_gps', 'zh': '約略經緯度(未實做)'},
# {'en': 'detail_dict', 'zh': '額外費用_原始', 'field': 'price_includes'},
{'en': 'additional_fee', 'zh': '額外費用_電費?', 'field': 'eletricity'},
{'en': 'additional_fee', 'zh': '額外費用_水費?', 'field': 'water'},
{'en': 'additional_fee', 'zh': '額外費用_瓦斯?', 'field': 'gas'},
{'en': 'additional_fee', 'zh': '額外費用_網路?', 'field': 'internet'},
{'en': 'additional_fee', 'zh': '額外費用_第四台?', 'field': 'cable_tv'},
# {'en': 'detail_dict', 'zh': '生活機能_原始', 'field': 'living_functions',
# 'fn': lambda x: '/'.join(x['environment'].get('生活機能', []))},
{'en': 'living_functions', 'field': 'school', 'zh': '附近有_學校?'},
{'en': 'living_functions', 'field': 'park', 'zh': '附近有_公園?'},
{'en': 'living_functions', 'field': 'dept_store', 'zh': '附近有_百貨公司?'},
{'en': 'living_functions', 'field': 'conv_store', 'zh': '附近有_超商?'},
{'en': 'living_functions', 'field': 'traditional_mkt', 'zh': '附近有_傳統市場?'},
{'en': 'living_functions', 'field': 'night_mkt', 'zh': '附近有_夜市?'},
{'en': 'living_functions', 'field': 'hospital', 'zh': '附近有_醫療機構?'},
# {'en': 'detail_dict', 'zh': '附近交通_原始', 'field': 'transportation',
# 'fn': lambda x: '/'.join(x['environment'].get('附近交通', []))},
{'en': 'transportation', 'field': 'subway', 'zh': '附近的捷運站數'},
{'en': 'transportation', 'field': 'bus', 'zh': '附近的公車站數'},
{'en': 'transportation', 'field': 'train', 'zh': '附近的火車站數'},
{'en': 'transportation', 'field': 'hsr', 'zh': '附近的高鐵站數'},
{'en': 'transportation', 'field': 'public_bike', 'zh': '附近的公共自行車數(實驗中)'},
# {'en': 'detail_dict', 'zh': '身份限制_原始', 'field': 'tenant_restriction',
# 'fn': lambda x: x['top_metas'].get('身份要求', '')},
{'en': 'has_tenant_restriction', 'zh': '有身份限制?'},
{'en': 'has_gender_restriction', 'zh': '有性別限制?'},
{'en': 'gender_restriction', 'zh': '性別限制', 'is_enum': enums.GenderType},
{'en': 'can_cook', 'zh': '可炊?'},
{'en': 'allow_pet', 'zh': '可寵?'},
{'en': 'has_perperty_registration', 'zh': '有產權登記?'},
{'en': 'contact', 'zh': '刊登者類型', 'is_enum': enums.ContactType},
{'en': 'author', 'zh': '刊登者編碼', 'fn': lambda x: str(x.uuid) if hasattr(x, 'uuid') else None},
{'en': 'agent_org', 'zh': '仲介資訊'},
]
facilities = [
'床', '桌子', '椅子', '電視', '熱水器', '冷氣',
'沙發', '洗衣機', '衣櫃', '冰箱', '網路', '第四台', '天然瓦斯'
]
def gen_facility_header(facility):
return {
'en': 'facilities',
'zh': '提供家具_{}?'.format(facility),
'field': facility,
'fn': lambda x: x.get(facility, '')
}
for facility in facilities:
structured_headers.append(gen_facility_header(facility))
def print_header(print_enum=True, file_name='rental_house'):
global structured_headers
zh_csv = open('{}.csv'.format(file_name), 'w')
zh_writer = csv.writer(zh_csv)
zh_csv_header = []
for header in structured_headers:
en = header['en']
if 'field' in header:
en += '_' + header['field']
zh_csv_header.append(header['zh'])
if print_enum and 'is_enum' in header and header['is_enum']:
zh_csv_header.append(header['zh'] + '_coding')
zh_writer.writerow(zh_csv_header)
return zh_writer
def prepare_houses(from_date, to_date):
global page_size
houses = House.objects.filter(
additional_fee__isnull=False,
created__lte=to_date,
crawled_at__gte=from_date
).order_by(
'-id'
)
paginator = Paginator(houses, page_size)
return paginator
def normalize_val(val, header, use_tf):
json_val = val
if 'fn' in header:
val = header['fn'](val)
json_val = val
elif val is not None and 'field' in header:
if hasattr(val, header['field']):
val = getattr(val, header['field'])
json_val = val
elif 'field' in header and header['field'] in val:
val = val[header['field']]
json_val = val
else:
val = ''
json_val = ''
if type(val) is datetime:
val = timezone.localtime(val).strftime('%Y-%m-%d %H:%M:%S %Z')
json_val = val
if val is None or val is '':
val = '-'
json_val = None
elif val is True:
val = 'T' if use_tf else 1
json_val = True
elif val is False:
val = 'F' if use_tf else 0
json_val = False
return val, json_val
def print_body(writer, houses, print_enum=True, use_tf=False, listWriter=None):
global structured_headers
global vendor_stats
count = 0
for house in houses:
if house.vendor.name not in vendor_stats:
vendor_stats[house.vendor.name] = 0
vendor_stats[house.vendor.name] += 1
vendor_stats['_total'] += 1
row = []
obj = {}
for header in structured_headers:
header_name = header['en']
if not hasattr(house, header_name):
row.append('-')
obj[header['en']] = None
else:
val, json_val = normalize_val(getattr(house, header_name), header, use_tf)
if print_enum:
row.append(val)
obj[header_name] = json_val
if 'is_enum' in header and header['is_enum']:
if val != '-':
row.append(header['is_enum'](val).name)
obj[header_name] = header['is_enum'](val).name
else:
row.append(val)
obj[header_name] = json_val
else:
row.append(val)
obj[header_name] = json_val
writer.writerow(row)
if list_writer:
try:
filename = enums.TopRegionType(house.top_region).name
except:
filename = 'default'
list_writer.write(
filename,
obj
)
count += 1
return count
def parse_date(input):
try:
return timezone.make_aware(datetime.strptime(input, '%Y%m%d'))
except ValueError:
raise argparse.ArgumentTypeError('Invalid date string: {}'.format(input))
arg_parser = argparse.ArgumentParser(description='Export house to csv')
arg_parser.add_argument(
'-e',
'--enum',
default=False,
const=True,
nargs='?',
help='print enumeration or not')
arg_parser.add_argument(
'-f',
'--from',
dest='from_date',
default=None,
type=parse_date,
help='from date, format: YYYYMMDD, default today'
)
arg_parser.add_argument(
'-t',
'--to',
dest='to_date',
default=None,
type=parse_date,
help='to date, format: YYYYMMDD, default today'
)
arg_parser.add_argument(
'-o',
'--outfile',
default='rental_house',
help='output file name, without postfix(.csv)'
)
arg_parser.add_argument(
'-j',
'--json',
default=False,
const=True,
nargs='?',
help='export json or not, each top region will be put in seperated files'
)
arg_parser.add_argument(
'-01',
'--01-instead-of-truefalse',
dest='use_01',
default=False,
const=True,
nargs='?',
help='use T/F to express boolean value in csv, instead of 1/0'
)
if __name__ == '__main__':
args = arg_parser.parse_args()
print_enum = args.enum is not False
want_json = args.json is not False
use_tf = args.use_01 is not True
from_date = args.from_date
to_date = args.to_date
if from_date is None:
from_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
if to_date is None:
to_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
if from_date > to_date:
from_date, to_date = to_date, from_date
to_date += timedelta(days=1)
writer = print_header(print_enum, args.outfile)
list_writer = None
if want_json:
list_writer = ListWriter(args.outfile)
print('===== Export all houses from {} to {} ====='.format(from_date, to_date))
paginator = prepare_houses(from_date, to_date)
total = paginator.count
current_done = 0
for page_num in paginator.page_range:
houses = paginator.page(page_num)
n_raws = print_body(writer, houses, print_enum, use_tf, list_writer)
current_done += n_raws
print('[{}] we have {}/{} rows'.format(datetime.now(), current_done, total))
if want_json:
list_writer.closeAll()
with open('{}.json'.format(args.outfile), 'w') as file:
json.dump(vendor_stats, file, ensure_ascii=False)
print('===== Export done =====\nData: {}.csv\nStatistics: {}.json\n'.format(args.outfile, args.outfile))
|
88590916661ef5f570a814467f6bcd9d33ca7ff2
|
469c1754788d8637a74e6306ae3a4e536dba88b0
|
/astroML/density_estimation/xdeconv.py
|
31e2baed6e7608675e486db5174d953ae979817c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
astroML/astroML
|
17bbbfdbd9da06407d727c6e032d23c5a63ec098
|
3ec75de08cddd59577e8c2a79be354c5eaebf9db
|
refs/heads/main
| 2023-08-07T11:54:17.311594
| 2022-12-12T08:52:31
| 2022-12-12T08:52:31
| 6,269,799
| 878
| 300
|
BSD-2-Clause
| 2023-05-09T14:41:47
| 2012-10-17T22:33:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,199
|
py
|
xdeconv.py
|
"""
Extreme deconvolution solver
This follows Bovy et al.
http://arxiv.org/pdf/0905.2979v2.pdf
Arbitrary mixing matrices R are not yet implemented: currently, this only
works with R = I.
"""
from time import time
import numpy as np
from scipy import linalg
try: # SciPy >= 0.19
from scipy.special import logsumexp as logsumexp
except ImportError:
from scipy.misc import logsumexp as logsumexp
from sklearn.base import BaseEstimator
from sklearn.mixture import GaussianMixture
from sklearn.utils import check_random_state
from ..utils import log_multivariate_gaussian
class XDGMM(BaseEstimator):
"""Extreme Deconvolution
Fit an extreme deconvolution (XD) model to the data
Parameters
----------
n_components: integer
number of gaussian components to fit to the data
max_iter: integer (optional)
number of EM iterations to perform (default=100)
tol: float (optional)
stopping criterion for EM iterations (default=1E-5)
Notes
-----
This implementation follows Bovy et al. arXiv 0905.2979
"""
def __init__(self, n_components, max_iter=100, tol=1E-5, verbose=False,
random_state=None):
self.n_components = n_components
self.max_iter = max_iter
self.tol = tol
self.verbose = verbose
self.random_state = random_state
# model parameters: these are set by the fit() method
self.V = None
self.mu = None
self.alpha = None
def fit(self, X, Xerr, R=None):
"""Fit the XD model to data
Parameters
----------
X: array_like
Input data. shape = (n_samples, n_features)
Xerr: array_like
Error on input data. shape = (n_samples, n_features, n_features)
R : array_like
(TODO: not implemented)
Transformation matrix from underlying to observed data. If
unspecified, then it is assumed to be the identity matrix.
"""
if R is not None:
raise NotImplementedError("mixing matrix R is not yet implemented")
X = np.asarray(X)
Xerr = np.asarray(Xerr)
n_samples, n_features = X.shape
# assume full covariances of data
assert Xerr.shape == (n_samples, n_features, n_features)
# initialize components via a few steps of GaussianMixture
# this doesn't take into account errors, but is a fast first-guess
gmm = GaussianMixture(self.n_components, max_iter=10,
covariance_type='full',
random_state=self.random_state).fit(X)
self.mu = gmm.means_
self.alpha = gmm.weights_
self.V = gmm.covariances_
logL = self.logL(X, Xerr)
for i in range(self.max_iter):
t0 = time()
self._EMstep(X, Xerr)
logL_next = self.logL(X, Xerr)
t1 = time()
if self.verbose:
print("%i: log(L) = %.5g" % (i + 1, logL_next))
print(" (%.2g sec)" % (t1 - t0))
if logL_next < logL + self.tol:
break
logL = logL_next
return self
def logprob_a(self, X, Xerr):
"""
Evaluate the probability for a set of points
Parameters
----------
X: array_like
Input data. shape = (n_samples, n_features)
Xerr: array_like
Error on input data. shape = (n_samples, n_features, n_features)
Returns
-------
p: ndarray
Probabilities. shape = (n_samples,)
"""
X = np.asarray(X)
Xerr = np.asarray(Xerr)
n_samples, n_features = X.shape
# assume full covariances of data
assert Xerr.shape == (n_samples, n_features, n_features)
X = X[:, np.newaxis, :]
Xerr = Xerr[:, np.newaxis, :, :]
T = Xerr + self.V
return log_multivariate_gaussian(X, self.mu, T) + np.log(self.alpha)
def logL(self, X, Xerr):
"""Compute the log-likelihood of data given the model
Parameters
----------
X: array_like
data, shape = (n_samples, n_features)
Xerr: array_like
errors, shape = (n_samples, n_features, n_features)
Returns
-------
logL : float
log-likelihood
"""
return np.sum(logsumexp(self.logprob_a(X, Xerr), -1))
def _EMstep(self, X, Xerr):
"""
Perform the E-step (eq 16 of Bovy et al)
"""
n_samples, n_features = X.shape
X = X[:, np.newaxis, :]
Xerr = Xerr[:, np.newaxis, :, :]
w_m = X - self.mu
T = Xerr + self.V
# ------------------------------------------------------------
# compute inverse of each covariance matrix T
Tshape = T.shape
T = T.reshape([n_samples * self.n_components,
n_features, n_features])
Tinv = np.array([linalg.inv(T[i])
for i in range(T.shape[0])]).reshape(Tshape)
T = T.reshape(Tshape)
# ------------------------------------------------------------
# evaluate each mixture at each point
N = np.exp(log_multivariate_gaussian(X, self.mu, T, Vinv=Tinv))
# ------------------------------------------------------------
# E-step:
# compute q_ij, b_ij, and B_ij
q = (N * self.alpha) / np.dot(N, self.alpha)[:, None]
tmp = np.sum(Tinv * w_m[:, :, np.newaxis, :], -1)
b = self.mu + np.sum(self.V * tmp[:, :, np.newaxis, :], -1)
tmp = np.sum(Tinv[:, :, :, :, np.newaxis]
* self.V[:, np.newaxis, :, :], -2)
B = self.V - np.sum(self.V[:, :, :, np.newaxis]
* tmp[:, :, np.newaxis, :, :], -2)
# ------------------------------------------------------------
# M-step:
# compute alpha, m, V
qj = q.sum(0)
self.alpha = qj / n_samples
self.mu = np.sum(q[:, :, np.newaxis] * b, 0) / qj[:, np.newaxis]
m_b = self.mu - b
tmp = m_b[:, :, np.newaxis, :] * m_b[:, :, :, np.newaxis]
tmp += B
tmp *= q[:, :, np.newaxis, np.newaxis]
self.V = tmp.sum(0) / qj[:, np.newaxis, np.newaxis]
def sample(self, size=1, random_state=None):
if random_state is None:
random_state = self.random_state
rng = check_random_state(random_state) # noqa: F841
shape = tuple(np.atleast_1d(size)) + (self.mu.shape[1],)
npts = np.prod(size) # noqa: F841
alpha_cs = np.cumsum(self.alpha)
r = np.atleast_1d(np.random.random(size))
r.sort()
ind = r.searchsorted(alpha_cs)
ind = np.concatenate(([0], ind))
if ind[-1] != size:
ind[-1] = size
draw = np.vstack([np.random.multivariate_normal(self.mu[i],
self.V[i],
(ind[i + 1] - ind[i],))
for i in range(len(self.alpha))])
return draw.reshape(shape)
|
4759d3b8b301784b1e0a5b21e1cdc2bd998c0558
|
20d4aac372684501ce6d1b35b8d0beeb20769341
|
/pyart/aux_io/pattern.py
|
fb3e7871fc849d929118e29ac6e4644d0e594887
|
[
"BSD-3-Clause"
] |
permissive
|
ARM-DOE/pyart
|
c108dd28a2d6eb1eaaa414408757feefac9515c3
|
172bbcf1cf3bcdb953c76ebae72c27c95dc2e606
|
refs/heads/main
| 2023-08-18T07:41:28.137014
| 2023-08-17T15:58:24
| 2023-08-17T15:58:24
| 6,248,645
| 455
| 255
|
NOASSERTION
| 2023-09-14T20:09:51
| 2012-10-16T17:51:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,822
|
py
|
pattern.py
|
"""
Routines for reading files from the X-band radar from the PATTERN_ project.
.. _PATTERN: http://www.mi.uni-hamburg.de/PATTERN-Pre.6763.0.html
"""
import datetime
import netCDF4
import numpy as np
from ..config import FileMetadata
from ..core.radar import Radar
from ..io.common import _test_arguments, make_time_unit_str
def read_pattern(filename, **kwargs):
"""
Read a netCDF file from a PATTERN project X-band radar.
Parameters
----------
filename : str
Name of netCDF file to read data from.
Returns
-------
radar : Radar
Radar object.
"""
# test for non empty kwargs
_test_arguments(kwargs)
# create metadata retrieval object
filemetadata = FileMetadata("pattern")
# read the data
ncobj = netCDF4.Dataset(filename)
ncvars = ncobj.variables
# general parameters
nrays = ncvars["Azimuth"].shape[0]
scan_type = "ppi"
# time
# interpolate between the first and last timestamps in the Time variable
time = filemetadata("time")
nctime = ncvars["Time"]
time["units"] = make_time_unit_str(datetime.datetime.utcfromtimestamp(nctime[0]))
time["data"] = np.linspace(0, nctime[-1] - nctime[0], nrays)
# range
_range = filemetadata("range")
_range["data"] = ncvars["Distance"][:]
_range["meters_to_center_of_first_gate"] = _range["data"][0]
# assuming the distance between all gates is constant, may not
# always be true.
_range["meters_between_gates"] = _range["data"][1] - _range["data"][0]
# fields
# files contain a single corrected reflectivity field
fields = {}
field_name = filemetadata.get_field_name("corrected_reflectivity")
field_dic = filemetadata(field_name)
field_dic["_FillValue"] = ncvars["Corrected_Reflectivity"]._FillValue
field_dic["data"] = ncvars["Corrected_Reflectivity"][:]
fields[field_name] = field_dic
# metadata
metadata = filemetadata("metadata")
for k in ["institution", "title", "used_algorithms"]:
if k in ncobj.ncattrs():
metadata[k] = ncobj.getncattr(k)
# latitude, longitude, altitude
latitude = filemetadata("latitude")
longitude = filemetadata("longitude")
altitude = filemetadata("altitude")
latitude["data"] = np.array([ncobj.latitude[:-1]], dtype="float64")
longitude["data"] = np.array([ncobj.longitude[:-1]], dtype="float64")
altitude["data"] = np.array([ncobj.elevation], dtype="float64")
# sweep parameters
# sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
# sweep_end_ray_index
sweep_number = filemetadata("sweep_number")
sweep_mode = filemetadata("sweep_mode")
fixed_angle = filemetadata("fixed_angle")
sweep_start_ray_index = filemetadata("sweep_start_ray_index")
sweep_end_ray_index = filemetadata("sweep_end_ray_index")
sweep_number["data"] = np.arange(1, dtype="int32")
sweep_mode["data"] = np.array(1 * ["azimuth_surveillance"])
fixed_angle["data"] = np.array([0], dtype="float32")
sweep_start_ray_index["data"] = np.array([0], dtype="int32")
sweep_end_ray_index["data"] = np.array([nrays - 1], dtype="int32")
# azimuth, elevation
azimuth = filemetadata("azimuth")
elevation = filemetadata("elevation")
azimuth["data"] = ncvars["Azimuth"][:]
elevation["data"] = np.array([0.0], dtype="float32")
# instrument parameters
instrument_parameters = None
return Radar(
time,
_range,
fields,
metadata,
scan_type,
latitude,
longitude,
altitude,
sweep_number,
sweep_mode,
fixed_angle,
sweep_start_ray_index,
sweep_end_ray_index,
azimuth,
elevation,
instrument_parameters=instrument_parameters,
)
|
677ca3b5fb5beffd7bceb5db0a95699192737137
|
767c07db1fb131047af3d9b0a065b8fdc8aac9ab
|
/74-sns/sns_facets_multiple.py
|
41b25a5a7b598249e64c6500e00917bd78e42fc8
|
[] |
no_license
|
DUanalytics/pyAnalytics
|
e52c5469da30a5f436ec0f3120d9f15fb82fd9b3
|
107a08bebe46ea51afccfeae4a666213bb405d41
|
refs/heads/master
| 2023-07-08T04:32:54.758902
| 2023-07-03T14:37:04
| 2023-07-03T14:37:04
| 202,094,535
| 394
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,829
|
py
|
sns_facets_multiple.py
|
#Facet PLot
#https://seaborn.pydata.org/tutorial/axis_grids.html
#-----------------------------
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks")
#
sns.FacetGrid?
sns.FacetGrid( data, row=None, col=None, hue=None, col_wrap=None, sharex=True, sharey=True,height=3, aspect=1, palette=None, row_order=None, col_order=None, hue_order=None, hue_kws=None, dropna=True, legend_out=True, despine=True, margin_titles=False, xlim=None, ylim=None, subplot_kws=None, gridspec_kws=None, size=None,)
#%%
tips = sns.load_dataset("tips")
tips.head()
tips.shape
tips.columns
#Histogram - Facet
sns.FacetGrid?
g = sns.FacetGrid(data=tips, row=None, col="time")
plt.figure(figsize=(7, 5))
g = sns.FacetGrid(tips, col="time")
g.map(plt.hist, "tip");
#Facet Scatter
g = sns.FacetGrid(tips, col="sex", hue="smoker")
g.map(plt.scatter, "total_bill", "tip", alpha=.7)
g.add_legend();
#Regression Plot
g = sns.FacetGrid(tips, row="smoker", col="time", margin_titles=True)
g.map(sns.regplot, "size", "total_bill", color=".3", fit_reg=False, x_jitter=.1);
# Barplot
g = sns.FacetGrid(tips, col="day", height=4, aspect=.5)
g.map(sns.barplot, "sex", "total_bill");
# Distribution Plot
ordered_days = tips.day.value_counts().index
g = sns.FacetGrid(tips, row="day", row_order=ordered_days, height=1.7, aspect=4,)
g.map(sns.distplot, "total_bill", hist=False, rug=True);
# scatter
pal = dict(Lunch="seagreen", Dinner="gray")
g = sns.FacetGrid(tips, hue="time", palette=pal, height=5)
g.map(plt.scatter, "total_bill", "tip", s=50, alpha=.7, linewidth=.5, edgecolor="white")
g.add_legend();
# Scatter
g = sns.FacetGrid(tips, hue="sex", palette="Set1", height=5, hue_kws={"marker": ["^", "v"]})
g.map(plt.scatter, "total_bill", "tip", s=100, linewidth=.5, edgecolor="white")
g.add_legend();
# Point Plot
attend = sns.load_dataset("attention").query("subject <= 12")
g = sns.FacetGrid(attend, col="subject", col_wrap=4, height=2, ylim=(0, 10))
g.map(sns.pointplot, "solutions", "score", color=".3", ci=None);
#
with sns.axes_style("white"):
g = sns.FacetGrid(tips, row="sex", col="smoker", margin_titles=True, height=2.5)
g.map(plt.scatter, "total_bill", "tip", color="#334488", edgecolor="white", lw=.5);
g.set_axis_labels("Total bill (US Dollars)", "Tip");
g.set(xticks=[10, 30, 50], yticks=[2, 6, 10]);
g.fig.subplots_adjust(wspace=.02, hspace=.02);
# Scatter
g = sns.FacetGrid(tips, col="smoker", margin_titles=True, height=4)
g.map(plt.scatter, "total_bill", "tip", color="#338844", edgecolor="white", s=50, lw=1)
for ax in g.axes.flat:
ax.plot((0, 50), (0, .2 * 50), c=".2", ls="--")
g.set(xlim=(0, 60), ylim=(0, 14));
# quantile
from scipy import stats
def quantile_plot(x, **kwargs):
qntls, xr = stats.probplot(x, fit=False)
plt.scatter(xr, qntls, **kwargs)
g = sns.FacetGrid(tips, col="sex", height=4)
g.map(quantile_plot, "total_bill");
# qqplot
def qqplot(x, y, **kwargs):
_, xr = stats.probplot(x, fit=False)
_, yr = stats.probplot(y, fit=False)
plt.scatter(xr, yr, **kwargs)
g = sns.FacetGrid(tips, col="smoker", height=4)
g.map(qqplot, "total_bill", "tip");
#qq plot
g = sns.FacetGrid(tips, hue="time", col="sex", height=4)
g.map(qqplot, "total_bill", "tip")
g.add_legend();
#
g = sns.FacetGrid(tips, hue="time", col="sex", height=4, hue_kws={"marker": ["s", "D"]})
g.map(qqplot, "total_bill", "tip", s=40, edgecolor="w")
g.add_legend();
#
def hexbin(x, y, color, **kwargs):
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(x, y, gridsize=15, cmap=cmap, **kwargs)
with sns.axes_style("dark"):
g = sns.FacetGrid(tips, hue="time", col="time", height=4)
g.map(hexbin, "total_bill", "tip", extent=[0, 50, 0, 10]);
#%%
cmap = sns.color_palette("Set3")
sns.boxplot(x='Cover_Type', y='Elevation', data=df, palette=cmap);
plt.xticks(rotation=45);
|
ceb592f23e5ebda698f79ca3e0533d5b27f95e76
|
ed80826b0e9f4ae360fa50d33bb82b4abb618f2e
|
/ipydatagrid/datagrid.py
|
556698028291de716d55d8524e6ac96a415106da
|
[
"BSD-3-Clause"
] |
permissive
|
bloomberg/ipydatagrid
|
a2fc2b3b928bb18bcb47be4a95670883be8e1791
|
5720115b158abd286678e35b6c9e9714746573cc
|
refs/heads/main
| 2023-07-25T14:50:19.410607
| 2023-05-22T15:26:46
| 2023-05-22T15:56:06
| 197,740,704
| 422
| 49
|
BSD-3-Clause
| 2023-09-08T18:11:48
| 2019-07-19T09:06:19
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 29,575
|
py
|
datagrid.py
|
# Copyright (c) Bloomberg.
# Distributed under the terms of the Modified BSD License.
import datetime
import decimal
from collections.abc import Iterator
from copy import deepcopy
from math import floor
import numpy as np
import pandas as pd
from ipywidgets import CallbackDispatcher, DOMWidget, widget_serialization
from traitlets import (
Bool,
Dict,
Enum,
Instance,
Int,
List,
Unicode,
default,
validate,
)
from ._frontend import module_name, module_version
from .cellrenderer import CellRenderer, TextRenderer
class SelectionIterator(Iterator):
def __init__(self, selections):
self._rect_index = 0
self._cell_index = 0
self._selections = selections
def __next__(self):
if self._rect_index >= len(self._selections):
raise StopIteration
rect = self._selections[self._rect_index]
row_col = self._index_to_row_col(rect, self._cell_index)
self._cell_index += 1
if row_col is None:
self._rect_index += 1
self._cell_index = 0
return self.__next__()
elif self._cell_in_previous_selected_rects(row_col):
return self.__next__()
else:
return row_col
@staticmethod
def _index_to_row_col(rect, index):
num_rows = rect["r2"] - rect["r1"] + 1
num_cols = rect["c2"] - rect["c1"] + 1
if index > (num_rows * num_cols - 1):
return None
return {
"r": rect["r1"] + floor(index / num_cols),
"c": rect["c1"] + index % num_cols,
}
def _cell_in_previous_selected_rects(self, cell):
return any(
self._cell_in_rect(cell, self._selections[i])
for i in range(0, self._rect_index)
)
@staticmethod
def _cell_in_rect(cell, rect):
return (
rect["r1"] <= cell["r"] <= rect["r2"]
and rect["c1"] <= cell["c"] <= rect["c2"]
)
class SelectionHelper:
"""A Helper Class for processing selections. Provides an iterator
to traverse selected cells.
"""
def __init__(self, data, selections, selection_mode, **kwargs):
super().__init__(**kwargs)
self._data = data
self._selections = selections
self._selection_mode = selection_mode
self._num_columns = -1
self._num_rows = -1
def __iter__(self):
selections = [
self._transform_rect_for_selection_mode(rect)
for rect in self._selections
]
return SelectionIterator(selections)
def __len__(self):
return sum(1 for _ in self)
def all(self):
"""
Returns all selected cells as a list. Each cell is
represented as a dictionary
with keys 'r': row and 'c': column
"""
return list(self)
def all_values(self):
"""
Returns values for all selected cells as a list.
"""
return [
DataGrid._get_cell_value_by_numerical_index(
self._data, cell["c"], cell["r"]
)
for cell in self
]
def _transform_rect_for_selection_mode(self, rect):
selection_mode = self._selection_mode
if selection_mode == "row":
return {
"r1": rect["r1"],
"c1": 0,
"r2": rect["r2"],
"c2": self._get_num_columns() - 1,
}
elif selection_mode == "column":
return {
"r1": 0,
"c1": rect["c1"],
"r2": self._get_num_rows() - 1,
"c2": rect["c2"],
}
else:
return rect
def _get_num_columns(self):
if self._num_columns != -1:
return self._num_columns
data = self._data
primary_keys = (
[]
if "primaryKey" not in data["schema"]
else data["schema"]["primaryKey"]
)
col_headers = [
field["name"]
for field in data["schema"]["fields"]
if field["name"] not in primary_keys
]
self._num_columns = len(col_headers)
return self._num_columns
def _get_num_rows(self):
if self._num_rows != -1:
return self._num_rows
data = self._data
self._num_rows = 0 if "data" not in data else len(data["data"])
return self._num_rows
# modified from ipywidgets original
def _data_to_json(x):
if isinstance(x, dict):
return {str(k): _data_to_json(v) for k, v in x.items()}
if isinstance(x, np.ndarray):
return _data_to_json(x.tolist())
if isinstance(x, (list, tuple)):
return [_data_to_json(v) for v in x]
if isinstance(x, int):
return x
if isinstance(x, float):
if np.isnan(x):
return "$NaN$"
if np.isposinf(x):
return "$Infinity$"
if np.isneginf(x):
return "$NegInfinity$"
return x
if isinstance(x, decimal.Decimal):
return str(x)
if isinstance(x, (datetime.datetime, datetime.date)):
return x.isoformat()
if x is pd.NaT:
return "$NaT$"
if pd.isna(x):
return "$NaN$"
return str(x)
_data_serialization = {
"from_json": widget_serialization["from_json"],
"to_json": lambda x, _: _data_to_json(x), # noqa: U101
}
def _widgets_dict_to_json(x, obj):
return {
str(k): widget_serialization["to_json"](v, obj) for k, v in x.items()
}
_widgets_dict_serialization = {
"from_json": widget_serialization["from_json"],
"to_json": _widgets_dict_to_json,
}
class DataGrid(DOMWidget):
"""A Grid Widget with filter, sort and selection capabilities.
Attributes
----------
base_row_size : int (default: 20)
Default row height
base_column_size : int (default: 64)
Default column width
base_row_header_size : int (default: 64)
Default row header width
base_column_header_size : int (default: 20)
Default column header height
header_visibility : {'all', 'row', 'column', 'none'} (default: 'all')
Header visibility mode
'all': both row and column headers visible
'row': only row headers visible
'column': only column headers visible
'none': neither row and column headers visible
dataframe : pandas dataframe
Data to display on Data Grid.
renderers : dict
Custom renderers to use for cell rendering. Keys of dictionary specify
column name, and value specifies the renderer
default_renderer : CellRenderer (default: TextRenderer)
Default renderer to use for cell rendering
header_renderer : CellRenderer (default: TextRenderer)
Renderer to use for header cell rendering
corner_renderer : CellRenderer (default: TextRenderer)
Renderer to use for corner header cell rendering
selection_mode : {'row', 'column', 'cell', 'none'} (default: 'none')
Selection mode used when user clicks on grid or makes selections
programmatically.
'row': Selecting a cell will select all the cells on the same row
'column': Selecting a cell will select all the cells on the same column
'cell': Individual cell selection
'none': Selection disabled
selections : list of dict
List of all selections. Selections are represented as rectangular
regions. Rectangles are defined as dictionaries with keys:
'r1': start row, 'c1': start column, 'r2': end row, 'c2': end column.
Start of rectangle is top-left corner and end is bottom-right corner
editable : boolean (default: false)
Boolean indicating whether cell grid can be directly edited
column_widths : Dict of strings to int (default: {})
Dict to specify custom column sizes
The keys (strings) indicate the names of the columns
The values (integers) indicate the widths
auto_fit_columns : Bool (default: True)
Specify whether column width should automatically be
determined by the grid
auto_fit_params : Dict. Specify column auto fit parameters.
Supported parameters:
1) area: where to resize column widths - 'row-header',
'body' or 'all' (default)
2) padding: add padding to resized column widths (15 pixels by default)
3) numCols: cap the number of columns to be resized (None)
grid_style : Dict of {propertyName: string | VegaExpr | Dict}
Dict to specify global grid styles.
The keys (strings) indicate the styling property
The values (css color properties or Vega Expression) indicate the values
See below for all supported styling properties
index_name : str (default: "key")
String to specify the index column name. **Only set when the grid
is constructed and is not an observable traitlet**
Accessors (not observable traitlets)
---------
selected_cells : list of dict
List of selected cells. Each cell is represented as a dictionary
with keys 'r': row and 'c': column
selected_cell_values : list
List of values for all selected cells.
selected_cell_iterator : iterator
An iterator to traverse selected cells one by one.
Supported styling properties:
void_color : color of the area where the grid is not painted
on the canvas
background_color : background color for all body cells
row_background_color : row-wise background color (can take
a string or Vega Expression)
column_background_color : column-wise background color (can take a
string of Vega Expression)
grid_line_color : color of both vertical and horizontal grid lines
vertical_grid_line_color : vertical grid line color
horizontal_grid_line_color : horizontal grid line color
header_background_color : background color for all non-body cells
(index and columns)
header_grid_line_color : grid line color for all non-body
cells (index and columns)
header_vertical_grid_line_color : vertical grid line color
for all non-body cells
header_horizontal_grid_line_color : horizontal grid line color
for all non-body cells
selection_fill_color : fill color of selected area
selection_border_color : border color of selected area
header_selection_fill_color : fill color of headers intersecting with
selected area at column or row
header_selection_border_color : border color of headers
intersecting with selected area at column or row
cursor_fill_color : fill color of cursor
cursor_border_color : border color of cursor
scroll_shadow : Dict of color parameters for scroll shadow (vertical and
horizontal). Takes three paramaters:
size : size of shadow in pixels
color1 : gradient color 1
color2 : gradient color 2
color3 : gradient color 3
"""
_model_name = Unicode("DataGridModel").tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_view_name = Unicode("DataGridView").tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(module_version).tag(sync=True)
base_row_size = Int(20).tag(sync=True)
base_column_size = Int(64).tag(sync=True)
base_row_header_size = Int(64).tag(sync=True)
base_column_header_size = Int(20).tag(sync=True)
header_visibility = Enum(
default_value="all", values=["all", "row", "column", "none"]
).tag(sync=True)
_transforms = List(Dict()).tag(sync=True, **widget_serialization)
_visible_rows = List(Int()).tag(sync=True)
_data = Dict().tag(sync=True, **_data_serialization)
renderers = Dict(Instance(CellRenderer)).tag(
sync=True, **_widgets_dict_serialization
)
default_renderer = Instance(CellRenderer).tag(
sync=True, **widget_serialization
)
header_renderer = Instance(CellRenderer, allow_none=True).tag(
sync=True, **widget_serialization
)
corner_renderer = Instance(CellRenderer, allow_none=True).tag(
sync=True, **widget_serialization
)
selection_mode = Enum(
default_value="none", values=["row", "column", "cell", "none"]
).tag(sync=True)
selections = List(Dict()).tag(sync=True)
editable = Bool(False).tag(sync=True)
column_widths = Dict({}).tag(sync=True, **_data_serialization)
grid_style = Dict(allow_none=True).tag(
sync=True, **_widgets_dict_serialization
)
auto_fit_columns = Bool(False).tag(sync=True)
auto_fit_params = Dict(
{"area": "all", "padding": 30, "numCols": None}, allow_none=False
).tag(sync=True)
def __init__(self, dataframe, index_name=None, **kwargs):
# Setting default index name if not explicitly
# set by the user.
self._index_name = index_name
self.data = dataframe
super().__init__(**kwargs)
self._cell_click_handlers = CallbackDispatcher()
self._cell_change_handlers = CallbackDispatcher()
self.on_msg(self.__handle_custom_msg)
def __handle_custom_msg(self, _, content, buffers): # noqa: U101,U100
if content["event_type"] == "cell-changed":
row = content["row"]
column = self._column_index_to_name(
self._data, content["column_index"]
)
value = content["value"]
# update data on kernel
self._data["data"][row][column] = value
# notify python listeners
self._cell_change_handlers(
{
"row": row,
"column": column,
"column_index": content["column_index"],
"value": value,
}
)
elif content["event_type"] == "cell-click":
# notify python listeners
self._cell_click_handlers(
{
"region": content["region"],
"column": content["column"],
"column_index": content["column_index"],
"row": content["row"],
"primary_key_row": content["primary_key_row"],
"cell_value": content["cell_value"],
}
)
@property
def data(self):
trimmed_primary_key = self._data["schema"]["primaryKey"][:-1]
if self._data["data"]:
df = pd.DataFrame(self._data["data"])
else:
df = pd.DataFrame(
{value["name"]: [] for value in self._data["schema"]["fields"]}
)
final_df = df.set_index(trimmed_primary_key)
final_df = final_df[final_df.columns[:-1]]
return final_df
@staticmethod
def generate_data_object(dataframe, guid_key="ipydguuid", index_name="key"):
dataframe[guid_key] = pd.RangeIndex(0, dataframe.shape[0])
# Renaming default index name from 'index' to 'key' on
# single index DataFrames. This allows users to use
# 'index' as a column name. If 'key' exists, we add _x
# suffix to id, where { x | 0 <= x < inf }
if not isinstance(dataframe.index, pd.MultiIndex):
if index_name in dataframe.columns:
index = 0
new_index_name = f"{index_name}_{index}"
while new_index_name in dataframe.columns:
index += 1
new_index_name = f"{index_name}_{index}"
dataframe = dataframe.rename_axis(new_index_name)
else:
dataframe = dataframe.rename_axis(index_name)
schema = pd.io.json.build_table_schema(dataframe)
reset_index_dataframe = dataframe.reset_index()
data = reset_index_dataframe.to_dict(orient="records")
# Check for multiple primary keys
key = reset_index_dataframe.columns[: dataframe.index.nlevels].tolist()
num_index_levels = len(key) if isinstance(key, list) else 1
# Check for nested columns in schema, if so, we need to update the
# schema to represent the actual column name values
if isinstance(schema["fields"][-1]["name"], tuple):
num_column_levels = len(dataframe.columns.levels)
primary_key = key.copy()
for i in range(num_index_levels):
new_name = [""] * num_column_levels
new_name[0] = schema["fields"][i]["name"]
schema["fields"][i]["name"] = tuple(new_name)
primary_key[i] = tuple(new_name)
schema["primaryKey"] = primary_key
uuid_pk = list(key[-1])
uuid_pk[0] = guid_key
schema["primaryKey"].append(tuple(uuid_pk))
else:
schema["primaryKey"] = key
schema["primaryKey"].append(guid_key)
schema["primaryKeyUuid"] = guid_key
return {
"data": data,
"schema": schema,
"fields": [{field["name"]: None} for field in schema["fields"]],
}
@data.setter
def data(self, dataframe):
# Reference for the original frame column and index names
# This is used to when returning the view data model
self.__dataframe_reference_index_names = dataframe.index.names
self.__dataframe_reference_columns = dataframe.columns
dataframe = dataframe.copy()
# Primary key used
index_key = self.get_dataframe_index(dataframe)
self._data = self.generate_data_object(
dataframe, "ipydguuid", index_key
)
def get_dataframe_index(self, dataframe):
"""Returns a primary key to be used in ipydatagrid's
view of the passed DataFrame"""
# Passed index_name takes highest priority
if self._index_name is not None:
return self._index_name
# Dataframe with names index used by default
if dataframe.index.name is not None:
return dataframe.index.name
# If no index_name param, nor named-index DataFrame
# have been passed, revert to default "key"
return "key"
def get_cell_value(self, column_name, primary_key_value):
"""Gets the value for a single or multiple cells by column name and index name.
Tuples should be used to index into multi-index columns."""
row_indices = self._get_row_index_of_primary_key(primary_key_value)
return [self._data["data"][row][column_name] for row in row_indices]
def set_cell_value(self, column_name, primary_key_value, new_value):
"""Sets the value for a single cell by column name and primary key.
Note: This method returns a boolean to indicate if the operation
was successful.
"""
row_indices = self._get_row_index_of_primary_key(primary_key_value)
# Bail early if key could not be found
if not row_indices:
return False
# Iterate over all indices
outcome = True
for row_index in row_indices:
has_column = column_name in self._data["data"][row_index]
if has_column and row_index is not None:
self._data["data"][row_index][column_name] = new_value
self._notify_cell_change(row_index, column_name, new_value)
else:
outcome = False
return outcome
def get_cell_value_by_index(self, column_name, row_index):
"""Gets the value for a single cell by column name and row index."""
return self._data["data"][row_index][column_name]
def set_cell_value_by_index(self, column_name, row_index, new_value):
"""Sets the value for a single cell by column name and row index.
Note: This method returns a boolean to indicate if the operation
was successful.
"""
has_column = column_name in self._data["data"][row_index]
if has_column and 0 <= row_index < len(self._data["data"]):
self._data["data"][row_index][column_name] = new_value
self._notify_cell_change(row_index, column_name, new_value)
return True
return False
def _notify_cell_change(self, row, column, value):
column_index = self._column_name_to_index(column)
# notify python listeners
self._cell_change_handlers(
{
"row": row,
"column": column,
"column_index": column_index,
"value": value,
}
)
# notify front-end
self.comm.send(
data={
"method": "custom",
"content": {
"event_type": "cell-changed",
"row": row,
"column": column,
"column_index": column_index,
"value": value,
},
}
)
def get_visible_data(self):
"""Returns a dataframe of the current View."""
data = deepcopy(self._data)
if self._visible_rows:
data["data"] = [data["data"][i] for i in self._visible_rows]
at = self._data["schema"]["primaryKey"]
return_df = pd.DataFrame(data["data"]).set_index(at)
return_df.index = return_df.index.droplevel(return_df.index.nlevels - 1)
return_df.index.names = self.__dataframe_reference_index_names
return_df.columns = self.__dataframe_reference_columns
return return_df
def transform(self, transforms):
"""Apply a list of transformation to this DataGrid."""
# TODO: Validate this input, or let it fail on view side?
self._transforms = transforms
def revert(self):
"""Revert all transformations."""
self._transforms = []
@default("default_renderer")
def _default_renderer(self):
return TextRenderer()
def clear_selection(self):
"""Clears all selections."""
self.selections.clear()
self.send_state("selections")
def select(self, row1, column1, row2=None, column2=None, clear_mode="none"):
"""
Select an individual cell or rectangular cell region.
Parameters
----------
row1 : int
Row index for individual cell selection or
start row index for rectangular region selection.
column1 : int
Column index for individual cell selection or
start column index for rectangular region selection.
row2 : int or None, optional (default: None)
End row index for rectangular region selection.
column2 : int or None, optional (default: None)
End column index for rectangular region selection.
clear_mode : string, optional, {'all', 'current', 'none'}
(default: 'none')
Clear mode to use when there are pre-existing selections.
'all' removes all pre-existing selections
'current' removes last pre-existing selection
'none' keeps pre-existing selections
"""
if row2 is None or column2 is None:
row2, column2 = row1, column1
if clear_mode == "all":
self.selections.clear()
elif clear_mode == "current" and len(self.selections) > 0:
self.selections.pop()
self.selections.append(
{
"r1": min(row1, row2),
"c1": min(column1, column2),
"r2": max(row1, row2),
"c2": max(column1, column2),
}
)
self.send_state("selections")
@property
def selected_cells(self):
"""
List of selected cells. Each cell is represented as a dictionary
with keys 'r': row and 'c': column
"""
return SelectionHelper(
self._data, self.selections, self.selection_mode
).all()
@property
def selected_cell_values(self):
"""
List of values for all selected cells.
"""
# Copy of the front-end data model
view_data = self.get_visible_data()
# Get primary key from dataframe
index_key = self.get_dataframe_index(view_data)
# Serielize to JSON table schema
view_data_object = self.generate_data_object(
view_data, "ipydguuid", index_key
)
return SelectionHelper(
view_data_object, self.selections, self.selection_mode
).all_values()
@property
def selected_cell_iterator(self):
"""
An iterator to traverse selected cells one by one.
"""
return SelectionHelper(self._data, self.selections, self.selection_mode)
@validate("selections")
def _validate_selections(self, proposal):
selections = proposal["value"]
for rectangle in selections:
r1 = min(rectangle["r1"], rectangle["r2"])
c1 = min(rectangle["c1"], rectangle["c2"])
r2 = max(rectangle["r1"], rectangle["r2"])
c2 = max(rectangle["c1"], rectangle["c2"])
rectangle["r1"] = r1
rectangle["c1"] = c1
rectangle["r2"] = r2
rectangle["c2"] = c2
return selections
@validate("editable")
def _validate_editable(self, proposal):
value = proposal["value"]
if value and self.selection_mode == "none":
self.selection_mode = "cell"
return value
@validate("_transforms")
def _validate_transforms(self, proposal):
transforms = proposal["value"]
field_len = len(self._data["schema"]["fields"])
for transform in transforms:
if transform["columnIndex"] > field_len:
raise ValueError("Column index is out of bounds.")
return transforms
@validate("_data")
def _validate_data(self, proposal):
table_schema = proposal["value"]
column_list = [f["name"] for f in table_schema["schema"]["fields"]]
if len(column_list) != len(set(column_list)):
msg = "The dataframe must not contain duplicate column names."
raise ValueError(msg)
return table_schema
def on_cell_change(self, callback, remove=False):
"""Register a callback to execute when a cell value changed.
The callback will be called with one argument, the dictionary
containing cell information with keys
"row", "column", "column_index", "value".
Parameters
----------
remove: bool (optional)
Set to true to remove the callback from the list of callbacks.
"""
self._cell_change_handlers.register_callback(callback, remove=remove)
def on_cell_click(self, callback, remove=False):
"""Register a callback to execute when a cell is clicked.
The callback will be called with one argument, the dictionary
containing cell information with following keys:
"region", "column", "column_index", "row", "primary_key_row",
"cell_value"
Parameters
----------
remove: bool (optional)
Set to true to remove the callback from the list of callbacks.
"""
self._cell_click_handlers.register_callback(callback, remove=remove)
@staticmethod
def _column_index_to_name(data, column_index):
if "schema" not in data or "fields" not in data["schema"]:
return None
col_headers = DataGrid._get_col_headers(data)
return (
None
if len(col_headers) <= column_index
else col_headers[column_index]
)
@staticmethod
def _get_col_headers(data):
primary_keys = (
[]
if "primaryKey" not in data["schema"]
else data["schema"]["primaryKey"]
)
col_headers = [
field["name"]
for field in data["schema"]["fields"]
if field["name"] not in primary_keys
]
return col_headers
def _column_name_to_index(self, column_name):
if "schema" not in self._data or "fields" not in self._data["schema"]:
return None
col_headers = self._get_col_headers(self._data)
try:
return col_headers.index(column_name)
except ValueError:
pass
def _get_row_index_of_primary_key(self, value):
value = value if isinstance(value, list) else [value]
schema = self._data["schema"]
key = schema["primaryKey"][:-1] # Omitting ipydguuid
if len(value) != len(key):
raise ValueError(
"The provided primary key value must be the same length "
"as the primary key."
)
row_indices = [
at
for at, row in enumerate(self._data["data"])
if all(row[key[j]] == value[j] for j in range(len(key)))
]
return row_indices
@staticmethod
def _get_cell_value_by_numerical_index(data, column_index, row_index):
"""Gets the value for a single cell by column index and row index."""
column = DataGrid._column_index_to_name(data, column_index)
if column is None:
return None
return data["data"][row_index][column]
|
4ab445ade110777f073a014fa1dab2c092afe6a2
|
02462a2fdf4ecb06a7d0493ad08e60003775a74b
|
/loompy/view_manager.py
|
e21a38a5c97c2097187055f96f015a3642e05493
|
[
"BSD-2-Clause"
] |
permissive
|
linnarsson-lab/loompy
|
135fe7ffc9124bc47a96f2fdfdd71ed62a7dd8aa
|
07d5ad7bda1b140f05b0d294dcf11de64351acc8
|
refs/heads/master
| 2023-07-19T07:58:19.950741
| 2023-07-10T10:51:27
| 2023-07-10T10:51:27
| 86,575,060
| 121
| 40
|
BSD-2-Clause
| 2022-03-05T08:39:57
| 2017-03-29T11:38:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
view_manager.py
|
import numpy as np
import loompy
from typing import *
class ViewManager:
"""
Create views by slicing an underlying LoomConnection or LoomView
"""
def __init__(self, ds: Any) -> None:
self.ds = ds
def __getitem__(self, slice_: Tuple[Union[slice, np.ndarray, int], Union[slice, np.ndarray, int]]) -> loompy.LoomView:
"""
Create a new view by slicing through the loom file or view
Args:
slice_ (2-tuple of slice, int or np.ndarray): How to slice the file or view
Returns:
A LoomView object, an in-memory representation of the sliced file
"""
if type(slice_) is not tuple or len(slice_) != 2:
raise ValueError("Views require slices along two dimensions")
rows = slice_[0]
cols = slice_[1]
ra = self.ds.ra[rows]
row_graphs = self.ds.row_graphs[rows]
ca = self.ds.ca[cols]
col_graphs = self.ds.col_graphs[cols]
layers = self.ds.layer[rows, cols]
return loompy.LoomView(layers, ra, ca, row_graphs, col_graphs, filename=self.ds.filename, file_attrs=self.ds.attrs)
|
c41c404b0e7b5e454548bc01828ec8455265bf76
|
e1bc3f6ba4b5be07eacb7447897e04f6bd292f4b
|
/cookiecutter_template/{{cookiecutter.project_name}}/rastervision_{{cookiecutter.project_name}}/rastervision/{{cookiecutter.project_name}}/configs/test.py
|
f4de7b1aa59dbfa75b5ae290513ccbb1431517cb
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
azavea/raster-vision
|
15bcab9cbc88dce7ebcb6dfe741d482018718e40
|
4f0ab5f04769a7eb5e677cbfa73f2391f4ab7d39
|
refs/heads/master
| 2023-08-30T10:19:09.204623
| 2023-08-28T19:46:08
| 2023-08-28T19:46:08
| 80,733,109
| 1,941
| 384
|
NOASSERTION
| 2023-09-14T20:35:16
| 2017-02-02T14:31:54
|
Python
|
UTF-8
|
Python
| false
| false
| 253
|
py
|
test.py
|
from rastervision.{{cookiecutter.project_name}}.test_pipeline_config import (
TestPipelineConfig)
def get_config(runner, root_uri='/opt/data/test-pipeline', message='hello world'):
return TestPipelineConfig(root_uri=root_uri, message=message)
|
023833fdc8cb7e6a2e53e41ba310e10bbb62d928
|
9882a8d98429fe0f227b062b0e89da9b881e902c
|
/losses/alignment.py
|
e7d233c334fed2541ceae8cd881dc6005d022f47
|
[
"Apache-2.0"
] |
permissive
|
grib0ed0v/face_recognition.pytorch
|
87306a5b8c7ded2bf61ddaf2166bb868be8e72cc
|
05cb9b30e8220445fcb27988926d88f330091c12
|
refs/heads/develop
| 2020-04-26T04:11:51.213924
| 2019-04-10T11:04:21
| 2019-04-10T11:04:21
| 173,293,569
| 170
| 23
|
Apache-2.0
| 2019-04-10T11:04:22
| 2019-03-01T11:49:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,786
|
py
|
alignment.py
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import torch
import torch.nn as nn
VALID_CORE_FUNC_TYPES = ['l1', 'l2', 'wing']
def wing_core(abs_x, w, eps):
"""Calculates the wing function from https://arxiv.org/pdf/1711.06753.pdf"""
return w*math.log(1. + abs_x / eps)
class AlignmentLoss(nn.Module):
"""Regression loss to train landmarks model"""
def __init__(self, loss_type='l2'):
super(AlignmentLoss, self).__init__()
assert loss_type in VALID_CORE_FUNC_TYPES
self.uniform_weights = True
self.weights = None
self.core_func_type = loss_type
self.eps = 0.031
self.w = 0.156
def set_weights(self, weights):
"""Set weights for the each landmark point in loss"""
self.uniform_weights = False
self.weights = torch.FloatTensor(weights).cuda()
def forward(self, input_values, target):
bs = input_values.shape[0]
loss = input_values - target
n_points = loss.shape[1] // 2
loss = loss.view(-1, n_points, 2)
if self.core_func_type == 'l2':
loss = torch.norm(loss, p=2, dim=2)
loss = loss.pow(2)
eyes_dist = (torch.norm(target[:, 0:2] - target[:, 2:4], p=2, dim=1).reshape(-1)).pow_(2)
elif self.core_func_type == 'l1':
loss = torch.norm(loss, p=1, dim=2)
eyes_dist = (torch.norm(target[:, 0:2] - target[:, 2:4], p=1, dim=1).reshape(-1))
elif self.core_func_type == 'wing':
wing_const = self.w - wing_core(self.w, self.w, self.eps)
loss = torch.abs(loss)
loss[loss < wing_const] = self.w*torch.log(1. + loss[loss < wing_const] / self.eps)
loss[loss >= wing_const] -= wing_const
loss = torch.sum(loss, 2)
eyes_dist = (torch.norm(target[:, 0:2] - target[:, 2:4], p=1, dim=1).reshape(-1))
if self.uniform_weights:
loss = torch.sum(loss, 1)
loss /= n_points
else:
assert self.weights.shape[0] == loss.shape[1]
loss = torch.mul(loss, self.weights)
loss = torch.sum(loss, 1)
loss = torch.div(loss, eyes_dist)
loss = torch.sum(loss)
return loss / (2.*bs)
|
502b02d1c61d4e86509630f79c837a98d579e3d2
|
39d3a01f30dd24be11f284a6fd25aae3c3ac56e5
|
/pixeldefend/models/pixelcnn_cifar.py
|
2bd1d74b9beec39dd65d303e28781ab2cafd0007
|
[] |
no_license
|
anishathalye/obfuscated-gradients
|
a94470ad4ca7a9f16f2856f7fbab558e2e4c891e
|
9ef8b37ae985183f1cd37a40d126af7ab6c8d057
|
refs/heads/master
| 2023-06-21T19:37:43.648545
| 2023-06-10T14:11:29
| 2023-06-10T14:11:29
| 119,627,984
| 917
| 187
| null | 2018-11-04T23:15:52
| 2018-01-31T03:13:46
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,166
|
py
|
pixelcnn_cifar.py
|
import models.pixel_cnn_pp.nn as nn
from models.pixel_cnn_pp.model import model_spec
from utils import optimistic_restore
import tensorflow as tf
import numpy as np
import os
_PIXELCNN_CHECKPOINT_NAME = 'params_cifar.ckpt'
DATA_DIR = os.path.join(
os.path.dirname(__file__),
os.pardir,
'data'
)
_obs_shape = (32, 32, 3)
_model_opt = {
'nr_resnet': 5,
'nr_filters': 160,
'nr_logistic_mix': 10,
'resnet_nonlinearity': 'concat_elu'
}
# XXX this being called "model" could cause problems if other things want to use the same scope
_model_func = tf.make_template('model', model_spec)
def _init_model(sess, checkpoint_name=None):
global _model_func
global _obs_shape
global _model_opt
if checkpoint_name is None:
checkpoint_name = _PIXELCNN_CHECKPOINT_NAME
checkpoint_path = os.path.join(DATA_DIR, checkpoint_name)
x_init = tf.placeholder(tf.float32, (1,) + _obs_shape)
model = _model_func(x_init, init=True, dropout_p=0.5, **_model_opt)
# XXX need to add a scope argument to optimistic_restore and filter for
# things that start with "{scope}/", so we can filter for "model/", because
# the pixelcnn checkpoint has some random unscoped stuff like 'Variable'
optimistic_restore(sess, checkpoint_path)
# input is [batch, 32, 32, 3], pixels in [-1, 1]
_initialized=False
_initialized_name=None
def model(sess, image, checkpoint_name=None):
global _initialized
global _initialized_name
global _model_func
global _model_opt
if checkpoint_name is not None:
checkpoint_name = os.path.basename(checkpoint_name)
# currently, we only support one version of this model loaded at a
# time; making multiple versions probably involves variable renaming or
# something else that's probably painful
assert not _initialized or _initialized_name == checkpoint_name
if not _initialized:
_init_model(sess, checkpoint_name)
_initialized = True
_initialized_name = checkpoint_name
out = _model_func(image, dropout_p=0, **_model_opt)
loss = nn.discretized_mix_logistic_loss(image, out)
return loss, out
|
94ba1ac5ca4e96bbfe8db89ae86087b430ab254f
|
7ae27ce9a8c477855f8fd5fac54685716d868349
|
/invokeai/app/invocations/noise.py
|
1f1d9fe3ce8d3d85b870dcbed792c14b724d599d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
invoke-ai/InvokeAI
|
5f7a2c1f19b1f686099a8cf4cec85aa9c7b6d81d
|
2bd3cf28eabff2dcf3339669be222061dd208cb8
|
refs/heads/main
| 2023-08-31T07:06:56.721576
| 2023-08-30T19:05:17
| 2023-08-30T19:05:17
| 525,592,995
| 15,987
| 1,678
|
Apache-2.0
| 2023-09-14T20:29:39
| 2022-08-17T01:04:27
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,386
|
py
|
noise.py
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) & the InvokeAI Team
import torch
from pydantic import validator
from invokeai.app.invocations.latent import LatentsField
from invokeai.app.util.misc import SEED_MAX, get_random_seed
from ...backend.util.devices import choose_torch_device, torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
InvocationContext,
OutputField,
invocation,
invocation_output,
)
"""
Utilities
"""
def get_noise(
width: int,
height: int,
device: torch.device,
seed: int = 0,
latent_channels: int = 4,
downsampling_factor: int = 8,
use_cpu: bool = True,
perlin: float = 0.0,
):
"""Generate noise for a given image size."""
noise_device_type = "cpu" if use_cpu else device.type
# limit noise to only the diffusion image channels, not the mask channels
input_channels = min(latent_channels, 4)
generator = torch.Generator(device=noise_device_type).manual_seed(seed)
noise_tensor = torch.randn(
[
1,
input_channels,
height // downsampling_factor,
width // downsampling_factor,
],
dtype=torch_dtype(device),
device=noise_device_type,
generator=generator,
).to("cpu")
return noise_tensor
"""
Nodes
"""
@invocation_output("noise_output")
class NoiseOutput(BaseInvocationOutput):
"""Invocation noise output"""
noise: LatentsField = OutputField(default=None, description=FieldDescriptions.noise)
width: int = OutputField(description=FieldDescriptions.width)
height: int = OutputField(description=FieldDescriptions.height)
def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int):
return NoiseOutput(
noise=LatentsField(latents_name=latents_name, seed=seed),
width=latents.size()[3] * 8,
height=latents.size()[2] * 8,
)
@invocation("noise", title="Noise", tags=["latents", "noise"], category="latents")
class NoiseInvocation(BaseInvocation):
"""Generates latent noise."""
seed: int = InputField(
ge=0,
le=SEED_MAX,
description=FieldDescriptions.seed,
default_factory=get_random_seed,
)
width: int = InputField(
default=512,
multiple_of=8,
gt=0,
description=FieldDescriptions.width,
)
height: int = InputField(
default=512,
multiple_of=8,
gt=0,
description=FieldDescriptions.height,
)
use_cpu: bool = InputField(
default=True,
description="Use CPU for noise generation (for reproducible results across platforms)",
)
@validator("seed", pre=True)
def modulo_seed(cls, v):
"""Returns the seed modulo (SEED_MAX + 1) to ensure it is within the valid range."""
return v % (SEED_MAX + 1)
def invoke(self, context: InvocationContext) -> NoiseOutput:
noise = get_noise(
width=self.width,
height=self.height,
device=choose_torch_device(),
seed=self.seed,
use_cpu=self.use_cpu,
)
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.save(name, noise)
return build_noise_output(latents_name=name, latents=noise, seed=self.seed)
|
ccfdd7514aa9113dc237e829c156d16759018eca
|
60ba5cc2f817471dd0ff84a15996b46b1dbfa6ba
|
/park/unittest/test_multi_dim_index.py
|
044efa666beac88fd27e218f4dafb1248c2e604c
|
[
"MIT"
] |
permissive
|
park-project/park
|
dd15d27e5859fe421c878a90627716623892b6f9
|
08f8f7f0dea14e011af2d5ce2a72410084eb8713
|
refs/heads/master
| 2023-06-28T11:38:30.359938
| 2022-04-07T14:42:21
| 2022-04-07T14:42:21
| 184,142,889
| 216
| 55
|
MIT
| 2023-06-14T16:10:38
| 2019-04-29T20:55:25
|
Python
|
UTF-8
|
Python
| false
| false
| 472
|
py
|
test_multi_dim_index.py
|
import sys
sys.path.append('/home/ubuntu/park')
import unittest
from park.unittest.run_env import run_env_with_random_agent
class TestMultiDimIndex(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.env_name = 'multi_dim_index'
def test_run_env_n_times(self, n=10):
for _ in range(n):
run_env_with_random_agent(self.env_name, seed=n)
TestMultiDimIndex().test_run_env_n_times(1)
|
3ff8d14a1652271308d12774bdbc2afbf41b2635
|
6436d1e6c23f9f43a8025889dc4414a3ad66acf2
|
/Assets/Python/BUG/Tabs/BugCityScreenOptionsTab.py
|
5e78f26838546355a735b97c4c554c2ab9108d53
|
[
"MIT"
] |
permissive
|
dguenms/Dawn-of-Civilization
|
b710195c4f46fe11d9229182c3b1e07b77f42637
|
a305e7846d085d6edf1e9c472e8dfceee1c07dd4
|
refs/heads/develop
| 2023-09-04T04:57:00.086384
| 2023-09-01T15:24:28
| 2023-09-01T15:24:28
| 45,362,597
| 116
| 121
|
MIT
| 2023-02-08T00:18:53
| 2015-11-01T23:52:28
|
C++
|
UTF-8
|
Python
| false
| false
| 6,908
|
py
|
BugCityScreenOptionsTab.py
|
## BugCityScreenOptionsTab
##
## Tab for the BUG City Screen Options.
##
## Copyright (c) 2007-2008 The BUG Mod.
##
## Author: EmperorFool
import BugOptionsTab
class BugCityScreenOptionsTab(BugOptionsTab.BugOptionsTab):
"BUG City Screen Options Screen Tab"
def __init__(self, screen):
BugOptionsTab.BugOptionsTab.__init__(self, "CityScreen", "City Screen")
def create(self, screen):
tab = self.createTab(screen)
panel = self.createMainPanel(screen)
column = self.addOneColumnLayout(screen, panel)
left, right = self.addTwoColumnLayout(screen, column, "Page", True)
self.createRawYieldsPanel(screen, left)
self.addSpacer(screen, left, "CityScreen1")
self.createHurryDetailPanel(screen, left)
self.addSpacer(screen, left, "CityScreen2")
self.createBuildingActualEffectsPanel(screen, left)
self.addSpacer(screen, left, "CityScreen3")
self.createGreatPersonBarPanel(screen, left)
self.addSpacer(screen, left, "CityScreen4")
self.createProductionQueuePanel(screen, left)
self.createCityBarPanel(screen, right)
self.addSpacer(screen, right, "CityScreen6")
self.createMiscellaneousPanel(screen, right)
def createRawYieldsPanel(self, screen, panel):
#self.addCheckboxTextDropdown(screen, left, left, "CityScreen__RawYields", "CityScreen__RawYields_View")
self.addCheckbox(screen, panel, "CityScreen__RawYields")
self.addTextDropdown(screen, panel, panel, "CityScreen__RawYieldsView", True)
self.addTextDropdown(screen, panel, panel, "CityScreen__RawYieldsTiles", True)
def createHurryDetailPanel(self, screen, panel):
self.addLabel(screen, panel, "HurryDetail", "Hurry Detail:")
left, right = self.addTwoColumnLayout(screen, panel, "HurryDetail", False)
self.addCheckbox(screen, left, "CityBar__HurryAssist")
self.addCheckbox(screen, right, "CityBar__HurryAssistIncludeCurrent")
self.addCheckbox(screen, left, "CityScreen__WhipAssist")
self.addCheckbox(screen, right, "CityScreen__WhipAssistOverflowCountCurrentProduction")
self.addCheckbox(screen, left, "MiscHover__HurryOverflow")
self.addCheckbox(screen, right, "MiscHover__HurryOverflowIncludeCurrent")
def createBuildingActualEffectsPanel(self, screen, panel):
self.addLabel(screen, panel, "BuildingEffects", "Building Actual Effects in Hovers:")
left, right = self.addTwoColumnLayout(screen, panel, "BuildingEffects", False)
self.addCheckbox(screen, left, "MiscHover__BuildingActualEffects")
self.addCheckbox(screen, left, "MiscHover__BuildingAdditionalFood")
self.addCheckbox(screen, left, "MiscHover__BuildingAdditionalProduction")
self.addCheckbox(screen, left, "MiscHover__BuildingAdditionalCommerce")
self.addCheckbox(screen, left, "MiscHover__BuildingSavedMaintenance")
self.addSpacer(screen, right, "CityScreen2a")
self.addCheckbox(screen, right, "MiscHover__BuildingAdditionalHealth")
self.addCheckbox(screen, right, "MiscHover__BuildingAdditionalHappiness")
self.addCheckbox(screen, right, "MiscHover__BuildingAdditionalGreatPeople")
self.addCheckbox(screen, right, "MiscHover__BuildingAdditionalDefense")
def createGreatPersonBarPanel(self, screen, panel):
self.addLabel(screen, panel, "GreatPersonBar", "Great Person Bar:")
self.addCheckbox(screen, panel, "CityScreen__GreatPersonTurns")
self.addCheckbox(screen, panel, "CityScreen__GreatPersonInfo")
self.addCheckbox(screen, panel, "MiscHover__GreatPeopleRateBreakdown")
def createProductionQueuePanel(self, screen, panel):
self.addLabel(screen, panel, "ProductionQueue", "Production Queue:")
self.addCheckbox(screen, panel, "CityScreen__ProductionStarted")
left, center, right = self.addThreeColumnLayout(screen, panel, "ProductionDecay")
self.addLabel(screen, left, "ProductionDecay", "Decay:")
self.addCheckbox(screen, center, "CityScreen__ProductionDecayQueue")
self.addCheckbox(screen, right, "CityScreen__ProductionDecayHover")
self.addIntDropdown(screen, left, center, "CityScreen__ProductionDecayQueueUnitThreshold", True)
self.addIntDropdown(screen, left, center, "CityScreen__ProductionDecayQueueBuildingThreshold", True)
self.addIntDropdown(screen, None, right, "CityScreen__ProductionDecayHoverUnitThreshold")
self.addIntDropdown(screen, None, right, "CityScreen__ProductionDecayHoverBuildingThreshold")
def createCityBarPanel(self, screen, panel):
self.addLabel(screen, panel, "CitybarHover", "City Bar Hover:")
left, right = self.addTwoColumnLayout(screen, panel, "CityBarHover", False)
self.addCheckbox(screen, left, "CityBar__BaseValues")
self.addCheckbox(screen, left, "CityBar__Health")
self.addCheckbox(screen, left, "CityBar__Happiness")
self.addCheckbox(screen, left, "CityBar__FoodAssist")
self.addCheckbox(screen, left, "CityBar__BaseProduction")
self.addCheckbox(screen, left, "CityBar__TradeDetail")
self.addCheckbox(screen, left, "CityBar__Commerce")
self.addCheckbox(screen, left, "CityBar__CultureTurns")
self.addCheckbox(screen, left, "CityBar__GreatPersonTurns")
self.addLabel(screen, right, "Cityanger", "City Anger:")
self.addCheckbox(screen, right, "CityBar__HurryAnger")
self.addCheckbox(screen, right, "CityBar__DraftAnger")
self.addSpacer(screen, right, "CityScreen5")
self.addCheckbox(screen, right, "CityBar__BuildingActualEffects")
self.addCheckbox(screen, right, "CityBar__BuildingIcons")
self.addCheckbox(screen, right, "CityBar__Specialists")
self.addCheckbox(screen, right, "CityBar__RevoltChance")
self.addCheckbox(screen, right, "CityBar__HideInstructions")
# EF: Airport Icons option is on Map tab
#self.addCheckbox(screen, right, "CityBar__AirportIcons")
def createMiscellaneousPanel(self, screen, panel):
self.addLabel(screen, panel, "Misc", "Miscellaneous:")
self.addCheckbox(screen, panel, "MiscHover__BaseCommerce")
self.addCheckbox(screen, panel, "CityScreen__FoodAssist")
self.addCheckbox(screen, panel, "CityScreen__Anger_Counter")
self.addCheckbox(screen, panel, "CityScreen__CultureTurns")
self.addCheckbox(screen, panel, "MainInterface__ProgressBarsTickMarks")
self.addCheckbox(screen, panel, "CityScreen__OnlyPresentReligions")
self.addCheckbox(screen, panel, "CityScreen__OnlyPresentCorporations")
self.addTextDropdown(screen, panel, panel, "CityScreen__Specialists", True)
#self.addCheckbox(screen, panel, "MiscHover__RemoveSpecialist")
self.addCheckbox(screen, panel, "MiscHover__UnitExperience")
self.addCheckbox(screen, panel, "MiscHover__UnitExperienceModifiers")
self.addCheckbox(screen, panel, "MiscHover__ConscriptUnit")
self.addCheckbox(screen, panel, "MiscHover__ConscriptLimit")
self.addCheckbox(screen, panel, "CityScreen__ProductionPopupTrainCivilianUnitsForever")
self.addCheckbox(screen, panel, "CityScreen__ProductionPopupTrainMilitaryUnitsForever")
|
e90049026804e7436f9e7b962052cf3072cd9cda
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/providers/apache/sqoop/operators/sqoop.py
|
8aaae63dabda4458722ed4d66c766f17c66a2b0e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 11,027
|
py
|
sqoop.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a sqoop 1 operator."""
from __future__ import annotations
import os
import signal
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.sqoop.hooks.sqoop import SqoopHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SqoopOperator(BaseOperator):
"""
Execute a Sqoop job.
Documentation for Apache Sqoop can be found here: https://sqoop.apache.org/docs/1.4.2/SqoopUserGuide.html
:param conn_id: str
:param cmd_type: str specify command to execute "export" or "import"
:param schema: Schema name
:param table: Table to read
:param query: Import result of arbitrary SQL query. Instead of using the table,
columns and where arguments, you can specify a SQL statement with the query
argument. Must also specify a destination directory with target_dir.
:param target_dir: HDFS destination directory where the data
from the rdbms will be written
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" Imports data to
into the specified format. Defaults to text.
:param columns: <col,col,col> Columns to import from table
:param num_mappers: Use n mapper tasks to import/export in parallel
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param export_dir: HDFS Hive database directory to export to the rdbms
:param input_null_string: The string to be interpreted as null
for string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the input field separator
:param input_lines_terminated_by: Sets the input end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param direct: Use direct export fast path
:param driver: Manually specify JDBC driver class to use
:param verbose: Switch to more verbose logging for debug purposes
:param relaxed_isolation: use read uncommitted isolation level
:param hcatalog_database: Specifies the database name for the HCatalog table
:param hcatalog_table: The argument value for this option is the HCatalog table
:param create_hcatalog_table: Have sqoop create the hcatalog table passed
in or not
:param properties: additional JVM properties passed to sqoop
:param extra_options: Extra import/export options to pass as dict to the SqoopHook.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
:param libjars: Optional Comma separated jar files to include in the classpath.
"""
template_fields: Sequence[str] = (
"conn_id",
"cmd_type",
"table",
"query",
"target_dir",
"file_type",
"columns",
"split_by",
"where",
"export_dir",
"input_null_string",
"input_null_non_string",
"staging_table",
"enclosed_by",
"escaped_by",
"input_fields_terminated_by",
"input_lines_terminated_by",
"input_optionally_enclosed_by",
"properties",
"extra_options",
"driver",
"hcatalog_database",
"hcatalog_table",
"schema",
)
template_fields_renderers = {"query": "sql"}
ui_color = "#7D8CA4"
def __init__(
self,
*,
conn_id: str = "sqoop_default",
cmd_type: str = "import",
table: str | None = None,
query: str | None = None,
target_dir: str | None = None,
append: bool = False,
file_type: str = "text",
columns: str | None = None,
num_mappers: int | None = None,
split_by: str | None = None,
where: str | None = None,
export_dir: str | None = None,
input_null_string: str | None = None,
input_null_non_string: str | None = None,
staging_table: str | None = None,
clear_staging_table: bool = False,
enclosed_by: str | None = None,
escaped_by: str | None = None,
input_fields_terminated_by: str | None = None,
input_lines_terminated_by: str | None = None,
input_optionally_enclosed_by: str | None = None,
batch: bool = False,
direct: bool = False,
driver: Any | None = None,
verbose: bool = False,
relaxed_isolation: bool = False,
properties: dict[str, Any] | None = None,
hcatalog_database: str | None = None,
hcatalog_table: str | None = None,
create_hcatalog_table: bool = False,
extra_options: dict[str, Any] | None = None,
schema: str | None = None,
libjars: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.conn_id = conn_id
self.cmd_type = cmd_type
self.table = table
self.query = query
self.target_dir = target_dir
self.append = append
self.file_type = file_type
self.columns = columns
self.num_mappers = num_mappers
self.split_by = split_by
self.where = where
self.export_dir = export_dir
self.input_null_string = input_null_string
self.input_null_non_string = input_null_non_string
self.staging_table = staging_table
self.clear_staging_table = clear_staging_table
self.enclosed_by = enclosed_by
self.escaped_by = escaped_by
self.input_fields_terminated_by = input_fields_terminated_by
self.input_lines_terminated_by = input_lines_terminated_by
self.input_optionally_enclosed_by = input_optionally_enclosed_by
self.batch = batch
self.direct = direct
self.driver = driver
self.verbose = verbose
self.relaxed_isolation = relaxed_isolation
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.create_hcatalog_table = create_hcatalog_table
self.properties = properties
self.extra_options = extra_options or {}
self.hook: SqoopHook | None = None
self.schema = schema
self.libjars = libjars
def execute(self, context: Context) -> None:
"""Execute sqoop job."""
if self.hook is None:
self.hook = self._get_hook()
if self.cmd_type == "export":
self.hook.export_table(
table=self.table, # type: ignore
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation,
schema=self.schema,
)
elif self.cmd_type == "import":
if self.table and self.query:
raise AirflowException("Cannot specify query and table together. Need to specify either or.")
if self.table:
self.hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver,
schema=self.schema,
)
elif self.query:
self.hook.import_query(
query=self.query,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver,
)
else:
raise AirflowException("Provide query or table parameter to import using Sqoop")
else:
raise AirflowException("cmd_type should be 'import' or 'export'")
def on_kill(self) -> None:
if self.hook is None:
self.hook = self._get_hook()
self.log.info("Sending SIGTERM signal to bash process group")
os.killpg(os.getpgid(self.hook.sub_process_pid), signal.SIGTERM)
def _get_hook(self) -> SqoopHook:
"""Return a SqoopHook instance."""
# Add `create-hcatalog-table` to extra options if option passed to operator in case of `import`
# command. Similarly, if new parameters are added to the operator, you can pass them to
# `extra_options` so that you don't need to modify `SqoopHook` for each new parameter.
if self.cmd_type == "import" and self.create_hcatalog_table:
self.extra_options["create-hcatalog-table"] = ""
return SqoopHook(
conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties,
libjars=self.libjars,
extra_options=self.extra_options,
)
|
2f90a5bdb40b8fd4f3230a6328d9a6aaeb7dbccb
|
d50ec43131be668368200315d1d9d307071d5385
|
/keanu-python/nd4j/nd4j/__version__.py
|
190d5b391a8f259545044fe6289da6c8f5469ecb
|
[
"MIT"
] |
permissive
|
improbable-research/keanu
|
605e4dc6a2f90f095c2c1ec91fa1222ae8d04530
|
99de10a15e0d4b33d323093a5cc2dd10b31c9954
|
refs/heads/develop
| 2023-04-14T01:17:29.130975
| 2021-09-21T10:24:48
| 2021-09-21T10:24:48
| 128,393,918
| 155
| 47
|
MIT
| 2023-04-12T00:18:07
| 2018-04-06T12:48:36
|
Java
|
UTF-8
|
Python
| false
| false
| 28
|
py
|
__version__.py
|
__version__ = '1.0.0-beta3'
|
e08fc5ccd9cb56f4548c7ba2abaebd54451c1bea
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/19_数学/计算几何/六边形蜂巢距离.py
|
95359bc7f5b4982e148d5bfbc84d8fcdda15db12
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
六边形蜂巢距离.py
|
# 六边形距离公式(蜂巢距离公式)
def hexagon_distance(x1: int, y1: int, x2: int, y2: int) -> int:
"""蜂巢六边形中两点(x1, y1)和(x2, y2)的距离(移动步数)"""
dx, dy = x1 - x2, y1 - y2
return max(abs(dx), abs(dy), abs(dx - dy)) # 注意这也是三维空间中到原点的切比雪夫距离
assert hexagon_distance(0, 0, 1, 1) == 1
assert hexagon_distance(1, 1, 2, 0) == 2
# ハニカム
DIR6 = ((0, 1), (1, 0), (1, -1), (0, -1), (-1, 0), (-1, 1))
# https://github.com/pranjalssh/CP_codes/blob/master/anta/!HexMap.cpp
|
dcbdaba607a880fdb4dd7b6fe79b15fa4a824aa2
|
a689769a760105bfae0feba6bbab0111eddedd5b
|
/tests/inputs/config.py
|
6da1f887d52ae6225416fb696bbacf2841ee3c67
|
[
"MIT"
] |
permissive
|
danielgtaylor/python-betterproto
|
b7ee67f19700de3c31047568d2edf2d52d333d7d
|
4cdf1bb9e0651b0604c1cc21172e13053da7653c
|
refs/heads/master
| 2023-08-24T14:47:15.068858
| 2023-07-29T11:06:56
| 2023-07-29T11:06:56
| 214,084,028
| 1,207
| 188
|
MIT
| 2023-09-02T10:12:28
| 2019-10-10T04:09:03
|
Python
|
UTF-8
|
Python
| false
| false
| 996
|
py
|
config.py
|
# Test cases that are expected to fail, e.g. unimplemented features or bug-fixes.
# Remove from list when fixed.
xfail = {
"namespace_keywords", # 70
"googletypes_struct", # 9
"googletypes_value", # 9
"import_capitalized_package",
"example", # This is the example in the readme. Not a test.
}
services = {
"googletypes_request",
"googletypes_response",
"googletypes_response_embedded",
"service",
"service_separate_packages",
"import_service_input_message",
"googletypes_service_returns_empty",
"googletypes_service_returns_googletype",
"example_service",
"empty_service",
"service_uppercase",
}
# Indicate json sample messages to skip when testing that json (de)serialization
# is symmetrical becuase some cases legitimately are not symmetrical.
# Each key references the name of the test scenario and the values in the tuple
# Are the names of the json files.
non_symmetrical_json = {"empty_repeated": ("empty_repeated",)}
|
869d14dde146ab10443dd1a78212033034c84109
|
476f16f7af921e53287efad025ae3ce32f38a4fa
|
/external/bloodhound/enumeration/outputworker.py
|
34ef92ae93e6e2e5517222cf7d3604a7a55995fa
|
[
"MIT"
] |
permissive
|
CasperGN/ActiveDirectoryEnumeration
|
dcf760ac7193d4de99449f77a633ef75404b3d0a
|
49641f7edd451fb07ebdada8b60b0451a1a38c16
|
refs/heads/master
| 2023-08-21T15:05:54.151817
| 2023-02-17T12:31:37
| 2023-02-17T12:31:37
| 245,241,283
| 133
| 40
|
MIT
| 2023-08-03T00:17:19
| 2020-03-05T18:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,941
|
py
|
outputworker.py
|
####################
#
# Copyright (c) 2018 Fox-IT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####################
import logging
import traceback
import codecs
import json
class OutputWorker(object):
@staticmethod
def write_worker(result_q, computers_filename):
"""
Worker to write the results from the results_q to the given files.
"""
computers_out = codecs.open(computers_filename, 'w', 'utf-8')
# If the logging level is DEBUG, we ident the objects
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
indent_level = 1
else:
indent_level = None
# Write start of the json file
computers_out.write('{"computers":[')
num_computers = 0
while True:
obj = result_q.get()
if obj is None:
logging.debug('Write worker obtained a None value, exiting')
break
objtype, data = obj
if objtype == 'computer':
if num_computers != 0:
computers_out.write(',')
json.dump(data, computers_out, indent=indent_level)
num_computers += 1
else:
logging.warning("Type is %s this should not happen", objtype)
result_q.task_done()
logging.debug('Write worker is done, closing files')
# Write metadata manually
computers_out.write('],"meta":{"type":"computers","count":%d, "version":3}}' % num_computers)
computers_out.close()
result_q.task_done()
@staticmethod
def membership_write_worker(result_q, enumtype, filename):
"""
Worker to write the results from the results_q to the given file.
This is for both users and groups
"""
try:
membership_out = codecs.open(filename, 'w', 'utf-8')
except:
logging.warning('Could not write file: %s', filename)
result_q.task_done()
return
# If the logging level is DEBUG, we ident the objects
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
indent_level = 1
else:
indent_level = None
# Write start of the json file
membership_out.write('{"%s":[' % enumtype)
num_members = 0
while True:
data = result_q.get()
if data is None:
break
if num_members != 0:
membership_out.write(',')
json.dump(data, membership_out, indent=indent_level)
num_members += 1
result_q.task_done()
logging.info('Found %d %s', num_members, enumtype)
# Write metadata manually
membership_out.write('],"meta":{"type":"%s","count":%d, "version":3}}' % (enumtype, num_members))
membership_out.close()
result_q.task_done()
|
997e91c1cf3d507747abd7556c59c7b269adc437
|
119fd34e3481b78b90f76ac1ea2829aa3ba670b0
|
/ktrain/text/ner/anago/models.py
|
7a66f9e4736d0653302d26c0505c413675d0f3e1
|
[
"Apache-2.0",
"CC-BY-NC-4.0",
"MIT"
] |
permissive
|
amaiya/ktrain
|
7b7862faa37909c038640177ae504b4869738e48
|
ab03ae68053b727cb8907e08c35f265531d1cb3a
|
refs/heads/master
| 2023-08-28T06:42:41.111198
| 2023-08-17T10:51:40
| 2023-08-17T10:51:40
| 169,442,310
| 1,217
| 309
|
Apache-2.0
| 2023-03-30T14:33:43
| 2019-02-06T17:01:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,478
|
py
|
models.py
|
"""
Model definition.
"""
from .... import utils as U
from ....imports import *
# if U.is_tf_keras():
# from .layers import CRF
# else:
# from .layers_standalone import CRF
def save_model(model, weights_file, params_file):
with open(params_file, "w") as f:
params = model.to_json()
json.dump(json.loads(params), f, sort_keys=True, indent=4)
model.save_weights(weights_file)
def load_model(weights_file, params_file):
with open(params_file) as f:
model = keras.models.model_from_json(f.read(), custom_objects={"CRF": CRF})
model.load_weights(weights_file)
return model
class BiLSTMCRF(object):
"""A Keras implementation of BiLSTM-CRF for sequence labeling.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
def __init__(
self,
num_labels,
word_vocab_size,
char_vocab_size=None,
word_embedding_dim=100,
char_embedding_dim=25,
word_lstm_size=100,
char_lstm_size=25,
fc_dim=100,
dropout=0.5,
embeddings=None,
use_char=True,
use_crf=True,
char_mask_zero=True,
use_elmo=False,
use_transformer_with_dim=None,
):
"""Build a Bi-LSTM CRF model.
Args:
word_vocab_size (int): word vocabulary size.
char_vocab_size (int): character vocabulary size.
num_labels (int): number of entity labels.
word_embedding_dim (int): word embedding dimensions.
char_embedding_dim (int): character embedding dimensions.
word_lstm_size (int): character LSTM feature extractor output dimensions.
char_lstm_size (int): word tagger LSTM output dimensions.
fc_dim (int): output fully-connected layer size.
dropout (float): dropout rate.
embeddings (numpy array): word embedding matrix.
use_char (boolean): add char feature.
use_crf (boolean): use crf as last layer.
char_mask_zero(boolean): mask zero for character embedding (see TF2 isse #33148 and #33069)
use_elmo(boolean): If True, model will be configured to accept Elmo embeddings
as an additional input to word and character embeddings
use_transformer_with_dim(int): If not None, model will be configured to accept
transformer embeddings of given dimension
"""
super(BiLSTMCRF).__init__()
self._char_embedding_dim = char_embedding_dim
self._word_embedding_dim = word_embedding_dim
self._char_lstm_size = char_lstm_size
self._word_lstm_size = word_lstm_size
self._char_vocab_size = char_vocab_size
self._word_vocab_size = word_vocab_size
self._fc_dim = fc_dim
self._dropout = dropout
self._use_char = use_char
self._use_crf = use_crf
self._embeddings = embeddings
self._num_labels = num_labels
self._char_mask_zero = char_mask_zero
self._use_elmo = use_elmo
self._use_transformer_with_dim = use_transformer_with_dim
def build(self):
# build word embedding
word_ids = keras.layers.Input(
batch_shape=(None, None), dtype="int32", name="word_input"
)
inputs = [word_ids]
embedding_list = []
if self._embeddings is None:
word_embeddings = keras.layers.Embedding(
input_dim=self._word_vocab_size,
output_dim=self._word_embedding_dim,
mask_zero=True,
name="word_embedding",
)(word_ids)
else:
word_embeddings = keras.layers.Embedding(
input_dim=self._embeddings.shape[0],
output_dim=self._embeddings.shape[1],
mask_zero=True,
weights=[self._embeddings],
name="word_embedding",
)(word_ids)
embedding_list.append(word_embeddings)
# build character based word embedding
if self._use_char:
char_ids = keras.layers.Input(
batch_shape=(None, None, None), dtype="int32", name="char_input"
)
inputs.append(char_ids)
char_embeddings = keras.layers.Embedding(
input_dim=self._char_vocab_size,
output_dim=self._char_embedding_dim,
mask_zero=self._char_mask_zero,
name="char_embedding",
)(char_ids)
char_embeddings = keras.layers.TimeDistributed(
keras.layers.Bidirectional(keras.layers.LSTM(self._char_lstm_size))
)(char_embeddings)
embedding_list.append(char_embeddings)
# add elmo embedding
if self._use_elmo:
elmo_embeddings = keras.layers.Input(shape=(None, 1024), dtype="float32")
inputs.append(elmo_embeddings)
embedding_list.append(elmo_embeddings)
# add transformer embedding
if self._use_transformer_with_dim is not None:
transformer_embeddings = keras.layers.Input(
shape=(None, self._use_transformer_with_dim), dtype="float32"
)
inputs.append(transformer_embeddings)
embedding_list.append(transformer_embeddings)
# concatenate embeddings
word_embeddings = (
keras.layers.Concatenate()(embedding_list)
if len(embedding_list) > 1
else embedding_list[0]
)
# build model
word_embeddings = keras.layers.Dropout(self._dropout)(word_embeddings)
z = keras.layers.Bidirectional(
keras.layers.LSTM(units=self._word_lstm_size, return_sequences=True)
)(word_embeddings)
z = keras.layers.Dense(self._fc_dim, activation="tanh")(z)
if self._use_crf:
from .layers import CRF
crf = CRF(self._num_labels, sparse_target=False)
loss = crf.loss_function
pred = crf(z)
else:
loss = "categorical_crossentropy"
pred = keras.layers.Dense(self._num_labels, activation="softmax")(z)
model = keras.Model(inputs=inputs, outputs=pred)
return model, loss
|
b8489f2deba2091748bf104259489729b44f2aa7
|
3d8951322db8a6251ec9d73cab45b56d8d4f06de
|
/ops.py
|
9b574663f6138cd2eda98854980afcd56800471a
|
[
"Apache-2.0"
] |
permissive
|
linjieyangsc/video_seg
|
f67e47e5ec9cfdea0e01805817e71c4a01165780
|
b956142691660f02bd72fad936879fc156ee5b47
|
refs/heads/master
| 2021-05-04T15:40:11.729445
| 2020-05-12T18:44:58
| 2020-05-12T18:44:58
| 120,236,287
| 172
| 28
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
ops.py
|
import tensorflow as tf
import os
slim = tf.contrib.slim
def instance_normalization(inputs,reuse=None, variables_collections=None,output_collections=None,
use_biases=True, trainable=True, scope=None):
with tf.variable_scope(scope, 'InstanceNorm', [inputs],
reuse=reuse) as sc:
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
if inputs_rank != 4:
raise ValueError('Inputs %s is not a 4D tensor.' % inputs.name)
dtype = inputs.dtype.base_dtype
axis = [1, 2]
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
#var_collections = slim.utils.get_variable_collections(
# variables_collections, name)
dtype = inputs.dtype.base_dtype
shape = tf.TensorShape([1, 1, 1]).concatenate(params_shape)
beta = slim.model_variable('beta', shape=shape, dtype=dtype,
initializer=tf.zeros_initializer(), collections=None,
trainable=use_biases)
gamma = slim.model_variable('gamma', shape=shape, dtype=dtype,
initializer=tf.ones_initializer(), collections=None,
trainable=trainable)
if use_biases:
print 'use biases'
else:
print 'not use biases'
outputs = inputs * gamma + beta
return slim.utils.collect_named_outputs(output_collections,
sc.original_name_scope,
outputs)
def conditional_normalization(inputs, gamma, reuse=None, variable_collections=None,
output_collections=None, trainable=True, scope=None):
with tf.variable_scope(scope, 'ConditionalNorm', [inputs, gamma],
reuse=reuse) as sc:
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
if inputs_rank != 4:
raise ValueError('Inputs %s is not a 4D tensor.' % inputs.name)
dtype = inputs.dtype.base_dtype
axis = [1, 2]
params_shape = inputs_shape[-1:]
gamma = tf.expand_dims(tf.expand_dims(gamma, 1), 1)
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
return inputs * gamma
|
abd2936b419aa87361bc847005c214abb27ce588
|
00f950030a550cfd7e972794ee993d94a92d75be
|
/ck/ck/repo/module/package/template-git-soft-customize.py
|
d2c27dd07a28ff3326907bb39b4011d1f88de019
|
[
"Apache-2.0",
"CC-BY-SA-3.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mlcommons/ck
|
6261de6a99934cdd84c413de01d2ee4df6f9217a
|
e4306117546ea01f688afc7540c0ae2e1c007470
|
refs/heads/master
| 2023-09-01T23:26:28.195951
| 2023-09-01T17:26:00
| 2023-09-01T17:26:00
| 26,230,485
| 122
| 33
|
Apache-2.0
| 2023-09-13T12:01:35
| 2014-11-05T17:14:43
|
Python
|
UTF-8
|
Python
| false
| false
| 653
|
py
|
template-git-soft-customize.py
|
#
# Automatically generated
#
import os
##############################################################################
# setup environment
def setup(i):
s=''
cus=i['customize']
env=i['env']
fp=cus.get('full_path','')
ep=cus.get('env_prefix','')
if ep=='':
return {'return':1, 'error':'environment prefix is not defined'}
if fp=='':
return {'return':1, 'error':''}
# p1=os.path.dirname(fp)
env[ep]=os.path.join(fp, 'src')
# You can extend environent variables for your soft here:
# env[ep+'_EXTENSION']=xyz
return {'return':0, 'bat':s}
|
f4fb59423eeb2e8d128543b03b0d629a6842d81d
|
2066e9e86a81291ef25029a37719a10d519c6545
|
/tasks/glue/data.py
|
d96f6962d97cee1d1b4b7948b906b01af0724cfb
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
NVIDIA/Megatron-LM
|
143cd64258191390a369442b705d5b348083c685
|
99b044bff07f8e5d48b45223ed4bb11bd4e884e6
|
refs/heads/main
| 2023-09-04T05:57:16.169810
| 2023-08-30T18:25:58
| 2023-08-30T18:25:58
| 176,982,014
| 6,315
| 1,136
|
NOASSERTION
| 2023-09-14T11:34:29
| 2019-03-21T16:15:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
data.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""GLUE dataset."""
from abc import ABC
from abc import abstractmethod
from torch.utils.data import Dataset
from megatron import print_rank_0
from tasks.data_utils import build_sample
from tasks.data_utils import build_tokens_types_paddings_from_text
class GLUEAbstractDataset(ABC, Dataset):
"""GLUE base dataset class."""
def __init__(self, task_name, dataset_name, datapaths,
tokenizer, max_seq_length):
# Store inputs.
self.task_name = task_name
self.dataset_name = dataset_name
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
print_rank_0(' > building {} dataset for {}:'.format(self.task_name,
self.dataset_name))
# Process the files.
string = ' > paths:'
for path in datapaths:
string += ' ' + path
print_rank_0(string)
self.samples = []
for datapath in datapaths:
self.samples.extend(self.process_samples_from_single_path(datapath))
print_rank_0(' >> total number of samples: {}'.format(
len(self.samples)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
raw_sample = self.samples[idx]
ids, types, paddings = build_tokens_types_paddings_from_text(
raw_sample['text_a'], raw_sample['text_b'],
self.tokenizer, self.max_seq_length)
sample = build_sample(ids, types, paddings,
raw_sample['label'], raw_sample['uid'])
return sample
@abstractmethod
def process_samples_from_single_path(self, datapath):
"""Abstract method that takes a single path / filename and
returns a list of dataset samples, each sample being a dict of
{'text_a': string, 'text_b': string, 'label': int, 'uid': int}
"""
pass
|
13894b214e3740b6a073540424c5ab59f9c8e8dc
|
8c39ba92cc71ff78242477d3256f6ee3daa872c7
|
/conans/test/integration/package_id/python_requires_package_id_test.py
|
3648dd8cf3faee57c4af70ac3878befd1998ff9e
|
[
"MIT"
] |
permissive
|
conan-io/conan
|
eb4427e534a0edbb1fb06c753d5d9587faaef93c
|
bac455d1329b6744cdc41747354a727c9233179f
|
refs/heads/release/2.0
| 2023-09-03T18:51:54.345761
| 2023-09-03T17:30:43
| 2023-09-03T17:30:43
| 47,190,624
| 7,754
| 1,182
|
MIT
| 2023-09-14T15:16:09
| 2015-12-01T13:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 6,041
|
py
|
python_requires_package_id_test.py
|
import textwrap
import unittest
from conans.test.utils.tools import TestClient, GenConanfile
from conans.util.files import save
PKG_ID_1 = "47b42eaf657374a3d040394f03961b66c53bda5e"
PKG_ID_2 = "8b7006bf91e5b52cc1ac24a7a4d9c326ee954bb2"
class PythonRequiresPackageIDTest(unittest.TestCase):
def setUp(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("export . --name=tool --version=1.1.1")
conanfile = textwrap.dedent("""
from conan import ConanFile
class Pkg(ConanFile):
python_requires ="tool/[*]"
""")
client2 = TestClient(cache_folder=client.cache_folder)
client2.save({"conanfile.py": conanfile})
self.client = client
self.client2 = client2
def test_default(self):
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.1.1", self.client2.out)
pkg_id = "170e82ef3a6bf0bbcda5033467ab9d7805b11d0b"
self.client2.assert_listed_binary({"pkg/0.1": (pkg_id,
"Build")})
self.client.run("export . --name=tool --version=1.1.2")
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.1.2", self.client2.out)
self.client2.assert_listed_binary({"pkg/0.1": (pkg_id,
"Build")})
# With a minor change, it fires a rebuild
self.client.run("export . --name=tool --version=1.2.0")
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.2.0", self.client2.out)
self.client2.assert_listed_binary({"pkg/0.1": ("5eb1e7ea93fdd67fe3c3b166d240844648ba2b7a",
"Build")})
def test_change_mode_conf(self):
# change the policy in conan.conf
save(self.client2.cache.new_config_path, "core.package_id:default_python_mode=patch_mode")
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.1.1", self.client2.out)
self.client2.assert_listed_binary({"pkg/0.1": (PKG_ID_1,
"Build")})
# with a patch change, new ID
self.client.run("export . --name=tool --version=1.1.2")
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.1.2", self.client2.out)
self.client2.assert_listed_binary({"pkg/0.1": (PKG_ID_2,
"Build")})
def test_unrelated_conf(self):
# change the policy in conan.conf
save(self.client2.cache.new_config_path,
"core.package_id:default_python_mode=unrelated_mode")
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.1.1", self.client2.out)
pkg_id = "da39a3ee5e6b4b0d3255bfef95601890afd80709"
self.client2.assert_listed_binary({"pkg/0.1": (pkg_id,
"Build")})
# with any change the package id doesn't change
self.client.run("export . --name=tool --version=1.1.2")
self.client2.run("create . --name=pkg --version=0.1 --build missing")
self.assertIn("tool/1.1.2", self.client2.out)
self.client2.assert_listed_binary({"pkg/0.1": (pkg_id,
"Cache")})
def test_change_mode_package_id(self):
# change the policy in package_id
conanfile = textwrap.dedent("""
from conan import ConanFile
class Pkg(ConanFile):
python_requires ="tool/[*]"
def package_id(self):
self.info.python_requires.patch_mode()
""")
self.client2.save({"conanfile.py": conanfile})
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.1.1", self.client2.out)
self.client2.assert_listed_binary({"pkg/0.1": (PKG_ID_1,
"Build")})
# with a patch change, new ID
self.client.run("export . --name=tool --version=1.1.2")
self.client2.run("create . --name=pkg --version=0.1")
self.assertIn("tool/1.1.2", self.client2.out)
self.client2.assert_listed_binary({"pkg/0.1": (PKG_ID_2,
"Build")})
class PythonRequiresForBuildRequiresPackageIDTest(unittest.TestCase):
def test(self):
client = TestClient()
save(client.cache.new_config_path, "core.package_id:default_python_mode=full_version_mode")
client.save({"conanfile.py": GenConanfile()})
client.run("create . --name=tool --version=1.1.1")
conanfile = textwrap.dedent("""
from conan import ConanFile
class Pkg(ConanFile):
python_requires ="tool/[>=0.0]"
""")
client2 = TestClient(cache_folder=client.cache_folder)
client2.save({"conanfile.py": conanfile,
"myprofile": "[tool_requires]\ntool/[>=0.0]\n"})
client2.run("create . --name=pkg --version=0.1 -pr=myprofile")
self.assertIn("tool/1.1.1", client2.out)
self.assertIn(f"pkg/0.1: Package '{PKG_ID_1}' created",
client2.out)
client.run("create . --name=tool --version=1.1.2")
client2.run("install --requires=pkg/0.1@ -pr=myprofile", assert_error=True)
self.assertIn(f"ERROR: Missing binary: pkg/0.1:{PKG_ID_2}",
client2.out)
self.assertIn("tool/1.1.2", client2.out)
self.assertNotIn("tool/1.1.1", client2.out)
client2.run("create . --name=pkg --version=0.1 -pr=myprofile")
# self.assertIn("pkg/0.1: Applying build-requirement: tool/1.1.2", client2.out)
self.assertIn(f"pkg/0.1: Package '{PKG_ID_2}' created",
client2.out)
|
785c4e1b10e973b28e2b3629d572577ae4022857
|
e196fe807b2720eb7f08ad9ca914887341bd9b44
|
/src/streamlink/utils/path.py
|
daba580dd4a0240ec40a626124c8a7098da629f5
|
[
"BSD-2-Clause"
] |
permissive
|
streamlink/streamlink
|
ab2ce4a8d71d2abd67f300628f04ce960e7696d0
|
561f7ef854e3ec076e5bd3efb3e7f8efe5df32df
|
refs/heads/master
| 2023-08-29T15:03:17.008502
| 2023-08-26T19:24:39
| 2023-08-27T11:02:30
| 68,402,336
| 9,529
| 1,385
|
BSD-2-Clause
| 2023-09-13T13:37:33
| 2016-09-16T17:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
path.py
|
from pathlib import Path
from shutil import which
from typing import List, Optional, Union
def resolve_executable(
custom: Optional[Union[str, Path]] = None,
names: Optional[List[str]] = None,
fallbacks: Optional[List[Union[str, Path]]] = None,
) -> Optional[Union[str, Path]]:
if custom:
return which(custom)
for item in (names or []) + (fallbacks or []):
executable = which(item)
if executable:
return executable
return None
|
9fcffa6d22cf9430819095180c91525f6943c098
|
23895eba556353a116d97a3e9fa60f7ed9c9f693
|
/Paths/Find Near Vertical Misses.py
|
d58d11239a608e3275849cae202528b4908fe9f8
|
[
"Apache-2.0"
] |
permissive
|
mekkablue/Glyphs-Scripts
|
9970200e6b7223be58ff9122dd519af176f210de
|
fe09b4cf3754bc10c3037c3312a19c1b909a74d6
|
refs/heads/master
| 2023-08-28T15:02:21.931491
| 2023-08-25T17:12:34
| 2023-08-25T17:12:34
| 2,517,418
| 322
| 108
|
Apache-2.0
| 2023-08-15T15:24:50
| 2011-10-05T07:12:37
|
Python
|
UTF-8
|
Python
| false
| false
| 21,532
|
py
|
Find Near Vertical Misses.py
|
#MenuTitle: Find Near Vertical Misses
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__ = """
Finds nodes that are close but not exactly on vertical metrics.
"""
import vanilla
class FindNearVerticalMisses(object):
marker = "❌"
heightsToCheck = []
prefID = "com.mekkablue.FindNearVerticalMisses"
prefDict = {
"deviance": "1",
"tolerateIfNextNodeIsOn": True,
"tolerateIfExtremum": True,
"includeHandles": False,
"removeOverlap": False,
"markNodes": False,
"includeNonExporting": False,
"includeComposites": False,
"exclude": False,
"openTab": True,
"reuseTab": True,
"whereToCheck.ascender": True,
"whereToCheck.capHeight": True,
"whereToCheck.shoulderHeight": False,
"whereToCheck.smallCapHeight": False,
"whereToCheck.xHeight": True,
"whereToCheck.baseline": True,
"whereToCheck.descender": True,
}
def __init__(self):
# Window 'self.w':
windowWidth = 320
windowHeight = 510
windowWidthResize = 300 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
(windowWidth, windowHeight), # default window size
"Find Near Vertical Misses", # window title
minSize=(windowWidth, windowHeight), # minimum size (for resizing)
maxSize=(windowWidth + windowWidthResize, windowHeight + windowHeightResize), # maximum size (for resizing)
autosaveName="com.mekkablue.FindNearVerticalMisses.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox((inset, linePos + 2, -inset, 14), "Find glyphs with nodes not exactly on vertical metrics:", sizeStyle='small', selectable=True)
linePos += lineHeight
self.w.devianceText = vanilla.TextBox((inset, linePos + 3, inset + 135, 14), "Find nodes off by up to:", sizeStyle='small', selectable=True)
self.w.deviance = vanilla.EditText((inset + 135, linePos, -inset, 19), "1", callback=self.SavePreferences, sizeStyle='small')
self.w.deviance.getNSTextField().setToolTip_("Finds nodes that are not equal to the metric, but off up to this value in units. Minimum: 1 unit.")
linePos += lineHeight
# BOX
linePos += int(lineHeight // 2)
self.w.whereToCheck = vanilla.Box((inset, linePos, -inset, int(lineHeight * 7.6)))
insetLinePos = int(inset * 0.2)
self.w.whereToCheck.ascender = vanilla.CheckBox(
(int(0.5 * inset), insetLinePos - 1, -inset, 20), "Ascender (caps ignored)", value=True, callback=self.SavePreferences, sizeStyle='small'
)
self.w.whereToCheck.ascender.getNSButton().setToolTip_("Checks if points are not exactly on, but just off the ascender of the corresponding master.")
linePos += lineHeight
insetLinePos += lineHeight
self.w.whereToCheck.capHeight = vanilla.CheckBox(
(int(0.5 * inset), insetLinePos - 1, -inset, 20), "Cap Height (lowercase ignored)", value=True, callback=self.SavePreferences, sizeStyle='small'
)
self.w.whereToCheck.capHeight.getNSButton().setToolTip_("Checks if points are not exactly on, but just off the capHeight of the corresponding master.")
linePos += lineHeight
insetLinePos += lineHeight
self.w.whereToCheck.shoulderHeight = vanilla.CheckBox(
(int(0.5 * inset), insetLinePos - 1, -inset, 20), "shoulderHeight (UC, LC, SC ignored)", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.whereToCheck.shoulderHeight.getNSButton().setToolTip_("Checks if points are not exactly on, but just off the shoulderHeight of the corresponding master.")
linePos += lineHeight
insetLinePos += lineHeight
self.w.whereToCheck.smallCapHeight = vanilla.CheckBox(
(int(0.5 * inset), insetLinePos - 1, -inset, 20), "smallCapHeight (only considers smallcaps)", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.whereToCheck.smallCapHeight.getNSButton().setToolTip_("Checks if points are not exactly on, but just off the smallCapHeight of the corresponding master.")
linePos += lineHeight
insetLinePos += lineHeight
self.w.whereToCheck.xHeight = vanilla.CheckBox(
(int(0.5 * inset), insetLinePos - 1, -inset, 20), "x-height (caps ignored)", value=True, callback=self.SavePreferences, sizeStyle='small'
)
self.w.whereToCheck.xHeight.getNSButton().setToolTip_("Checks if points are not exactly on, but just off the xHeight of the corresponding master.")
linePos += lineHeight
insetLinePos += lineHeight
self.w.whereToCheck.baseline = vanilla.CheckBox((int(0.5 * inset), insetLinePos - 1, -inset, 20), "Baseline", value=True, callback=self.SavePreferences, sizeStyle='small')
self.w.whereToCheck.baseline.getNSButton().setToolTip_("Checks if points are not exactly on, but just off the baseline of the corresponding master.")
linePos += lineHeight
insetLinePos += lineHeight
self.w.whereToCheck.descender = vanilla.CheckBox(
(int(0.5 * inset), insetLinePos - 1, -inset, 20), "Descender", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.whereToCheck.descender.getNSButton().setToolTip_("Checks if points are not exactly on, but just off the descender of the corresponding master.")
linePos += lineHeight
insetLinePos += lineHeight
linePos += lineHeight
# BOX END
self.w.tolerateIfNextNodeIsOn = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Tolerate near miss if next node is on", value=True, callback=self.SavePreferences, sizeStyle='small'
)
self.w.tolerateIfNextNodeIsOn.getNSButton().setToolTip_(
"Will skip the just-off node if the next or previous on-curve node is EXACTLY on the metric line. Useful if you have very thin serifs or short segments near the metric lines."
)
linePos += lineHeight
self.w.tolerateIfExtremum = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Tolerate near miss for left/right curve extremum", value=True, callback=self.SavePreferences, sizeStyle='small'
)
self.w.tolerateIfExtremum.getNSButton(
).setToolTip_("Will skip the just-off node if the next and previous nodes are VERTICAL OFF-CURVES. Recommended for avoiding false positives.")
linePos += lineHeight
self.w.includeHandles = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Include off-curve points", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.includeHandles.getNSButton().setToolTip_("Also checks BCPs (Bézier control points), vulgo ‘handles’. Otherwise only considers on-curve nodes")
linePos += lineHeight
self.w.removeOverlap = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Check outlines after Remove Overlap (slower)", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.removeOverlap.getNSButton().setToolTip_(
"Only checks outlines after overlap removal. That way, ignores triangular overlaps (‘opened corners’). Use this option if you have too many false positives."
)
linePos += lineHeight
self.w.markNodes = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), f"Mark affected nodes with {self.marker}", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.markNodes.getNSButton().setToolTip_(
"Sets the name of affected nodes to this emoji, so you can easily find it. ATTENTION: If Remove Overlap option is on, will use the emoji as an annotation instead."
)
linePos += lineHeight
self.w.includeNonExporting = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Include non-exporting glyphs", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.includeNonExporting.getNSButton(
).setToolTip_("Also check for near misses in glyphs that are set to not export. Useful if you are using non-exporting parts as components in other glyphs.")
linePos += lineHeight
self.w.includeComposites = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Include composites", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.includeComposites.getNSButton(
).setToolTip_("If unchecked, will only go through glyphs that have paths in them. Recommended to leave off, because it usually reports a lot of false positives.")
linePos += lineHeight
self.w.excludeText = vanilla.TextBox((inset, linePos + 3, 150, 14), "Exclude glyphs containing:", sizeStyle='small', selectable=True)
self.w.exclude = vanilla.EditText((inset + 150, linePos, -inset, 19), ".ornm, .notdef, comb", callback=self.SavePreferences, sizeStyle='small')
linePos += lineHeight
self.w.openTab = vanilla.CheckBox((inset, linePos - 1, 190, 20), "Open tab with affected layers", value=True, callback=self.SavePreferences, sizeStyle='small')
self.w.openTab.getNSButton().setToolTip_(
"If it finds nodes just off the indicated metrics, will open a new tab with the layers if found the deviating nodes on. Otherwise please check the detailed report in Macro Window."
)
self.w.reuseTab = vanilla.CheckBox((inset + 190, linePos - 1, -inset, 20), "Reuse current tab", value=True, callback=self.SavePreferences, sizeStyle='small')
self.w.reuseTab.getNSButton().setToolTip_("If a tab is open already, will use that one, rather than opening a new tab. Recommended, keeps tab clutter low.")
linePos += lineHeight
self.w.progress = vanilla.ProgressBar((inset, linePos, -inset, 16))
self.w.progress.set(0) # set progress indicator to zero
linePos += lineHeight
# Run Button:
self.w.runButton = vanilla.Button((-80 - inset, -20 - inset, -inset, -inset), "Find", sizeStyle='regular', callback=self.FindNearVerticalMissesMain)
self.w.setDefaultButton(self.w.runButton)
# Status Message:
self.w.status = vanilla.TextBox((inset, -18 - inset, -80 - inset, 14), "🤖 Ready.", sizeStyle='small', selectable=True)
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Find Near Vertical Misses' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def domain(self, prefName):
prefName = prefName.strip().strip(".")
return self.prefID + "." + prefName.strip()
def pref(self, prefName):
prefDomain = self.domain(prefName)
return Glyphs.defaults[prefDomain]
def uiElement(self, prefName):
particles = prefName.split(".")
latestObject = self.w
for particle in particles:
latestObject = getattr(latestObject, particle)
return latestObject
def SavePreferences(self, sender=None):
try:
# write current settings into prefs:
for prefName in self.prefDict.keys():
Glyphs.defaults[self.domain(prefName)] = self.uiElement(prefName).get()
self.checkGUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def LoadPreferences(self):
try:
for prefName in self.prefDict.keys():
# register defaults:
Glyphs.registerDefault(self.domain(prefName), self.prefDict[prefName])
# load previously written prefs:
self.uiElement(prefName).set(self.pref(prefName))
self.checkGUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def checkGUI(self, sender=None):
# At least one vertical metrics must be on, otherwise disable button:
enableButton = False
boxDict = self.w.whereToCheck.__dict__
for itemName in boxDict:
checkbox = boxDict[itemName]
if type(checkbox) == vanilla.vanillaCheckBox.CheckBox:
if checkbox.get():
enableButton = True
break
self.w.runButton.enable(onOff=enableButton)
# disable Reuse Tab button if Open Tab is off:
self.w.reuseTab.enable(self.w.openTab.get())
def isNodeSlightlyOff(self, nodePosition, master, deviance, prevY, nextY, glyphType=None, glyphSuffix=None):
y = nodePosition.y
prevAndNextDontCount = prevY is None or nextY is None
if self.pref("descender"):
if y != master.descender:
if master.descender - deviance <= y <= master.descender + deviance:
if prevAndNextDontCount or (prevY != master.descender and nextY != master.descender):
return True
else:
# prev or next node or exactly on metric line, so do not count as off:
return False
if self.pref("baseline"):
if y != 0.0:
if 0.0 - deviance <= y <= 0.0 + deviance:
if prevAndNextDontCount or (prevY != 0.0 and nextY != 0.0):
return True
else:
# prev or next node or exactly on metric line, so do not count as off:
return False
if self.pref("xHeight"):
if glyphType is None or not glyphType in ("Uppercase", "Smallcaps"):
if y != master.xHeight:
if master.xHeight - deviance <= y <= master.xHeight + deviance:
if prevAndNextDontCount or (prevY != master.xHeight and nextY != master.xHeight):
return True
else:
# prev or next node or exactly on metric line, so do not count as off:
return False
if self.pref("smallCapHeight"):
suffixIsSC = False
if glyphSuffix:
suffixes = glyphSuffix.split(".") # could be multiple suffixes
for suffix in ("sc", "smcp", "c2sc"):
if suffix in suffixes:
suffixIsSC = True
if suffixIsSC or glyphType == "Smallcaps": # is smallcap
smallCapHeight = master.customParameters["smallCapHeight"]
if smallCapHeight:
smallCapHeight = float(smallCapHeight)
if y != smallCapHeight:
if smallCapHeight - deviance <= y <= smallCapHeight + deviance:
if prevAndNextDontCount or (prevY != smallCapHeight and nextY != smallCapHeight):
return True
else:
# prev or next node or exactly on metric line, so do not count as off:
return False
if self.pref("shoulderHeight"):
if glyphType is None or not glyphType in ("Lowercase", "Uppercase", "Smallcaps"):
shoulderHeight = master.customParameters["shoulderHeight"]
if shoulderHeight:
shoulderHeight = float(shoulderHeight)
if y != shoulderHeight:
if shoulderHeight - deviance <= y <= shoulderHeight + deviance:
if prevAndNextDontCount or (prevY != shoulderHeight and nextY != shoulderHeight):
return True
else:
# prev or next node or exactly on metric line, so do not count as off:
return False
if self.pref("capHeight"):
if glyphType is None or not glyphType in ("Lowercase", "Smallcaps"):
if y != master.capHeight:
if master.capHeight - deviance <= y <= master.capHeight + deviance:
if prevAndNextDontCount or (prevY != master.capHeight and nextY != master.capHeight):
return True
else:
# prev or next node or exactly on metric line, so do not count as off:
return False
if self.pref("ascender"):
if glyphType is None or not glyphType in ("Uppercase", "Smallcaps"):
if y != master.ascender:
if master.ascender - deviance <= y <= master.ascender + deviance:
if prevAndNextDontCount or (prevY != master.ascender and nextY != master.ascender):
return True
else:
# prev or next node or exactly on metric line, so do not count as off:
return False
return False
def doubleCheckNodeName(self, thisNode):
if thisNode.name == self.marker:
thisNode.name = None
def doubleCheckAnnotations(self, thisLayer):
for i in range(len(thisLayer.annotations))[::-1]:
if thisLayer.annotations[i].text == self.marker:
del thisLayer.annotations[i]
def addAnnotation(self, layer, position, text):
marker = GSAnnotation()
marker.type = TEXT
marker.position = position
marker.text = text
marker.width = min(max(50.0, 7 * len(text)), 600.0) # min=50, max=600
layer.annotations.append(marker)
def FindNearVerticalMissesMain(self, sender):
try:
# clears macro window log:
Glyphs.clearLog()
self.w.progress.set(0)
# update settings to the latest user input:
if not self.SavePreferences(self):
print("Note: 'Find Near Vertical Misses' could not write preferences.")
self.checkGUI()
thisFont = Glyphs.font # frontmost font
print(f"Find Near Vertical Misses Report for {thisFont.familyName}")
print(thisFont.filepath)
print()
includeComposites = self.pref("includeComposites")
includeNonExporting = self.pref("includeNonExporting")
deviance = float(self.pref("deviance"))
excludes = [x.strip() for x in self.pref("exclude").split(",")]
skippedGlyphs = []
affectedLayers = []
totalNumberOfGlyphs = len(thisFont.glyphs)
for i, thisGlyph in enumerate(thisFont.glyphs):
self.w.progress.set(100 * i // totalNumberOfGlyphs)
glyphIsExcluded = not (thisGlyph.export or includeNonExporting)
if not glyphIsExcluded:
for excludedText in excludes:
if excludedText in thisGlyph.name:
skippedGlyphs.append(thisGlyph.name)
glyphIsExcluded = True
break
if not glyphIsExcluded:
self.w.status.set(f"🔠 {thisGlyph.name}")
suffix = None
if "." in thisGlyph.name:
offset = thisGlyph.name.find(".")
suffix = thisGlyph.name[offset:]
for thisLayer in thisGlyph.layers:
# get rid of debris from previous iterations:
self.doubleCheckAnnotations(thisLayer)
layerCounts = thisLayer.isMasterLayer or thisLayer.isSpecialLayer
layerShouldBeChecked = len(thisLayer.paths) > 0 or includeComposites
if layerCounts and layerShouldBeChecked:
# overlap removal if requested:
if self.pref("removeOverlap"):
checkLayer = thisLayer.copyDecomposedLayer()
checkLayer.removeOverlap()
else:
checkLayer = thisLayer
# step through nodes:
for thisPath in checkLayer.paths:
for thisNode in thisPath.nodes:
nodeIsOncurve = thisNode.type != OFFCURVE
if nodeIsOncurve or self.pref("includeHandles"):
skipThisNode = False
if self.pref("tolerateIfExtremum"):
if thisNode.prevNode:
if thisNode.prevNode.type == OFFCURVE and thisNode.nextNode.type == OFFCURVE:
vertical = thisNode.x == thisNode.prevNode.x == thisNode.nextNode.x
linedUp = (thisNode.y - thisNode.prevNode.y) * (thisNode.nextNode.y - thisNode.y) > 0.0
if vertical and linedUp:
skipThisNode = True
else:
print(f"⚠️ Potential open path in {thisGlyph.name}")
if not skipThisNode:
if self.pref("tolerateIfNextNodeIsOn"):
# determine previous oncurve point
previousOnCurve = thisNode.prevNode
if previousOnCurve:
while previousOnCurve.type == OFFCURVE:
previousOnCurve = previousOnCurve.prevNode
previousY = previousOnCurve.y
# determine next oncurve point
nextOnCurve = thisNode.nextNode
while nextOnCurve.type == OFFCURVE:
nextOnCurve = nextOnCurve.nextNode
nextY = nextOnCurve.y
else:
print(f"⚠️ Potential open path in {thisGlyph.name}")
else:
previousY = None
nextY = None
glyphType = None
if Glyphs.versionNumber >= 3:
# GLYPHS 3
if thisGlyph.case == GSUppercase:
glyphType = "Uppercase"
elif thisGlyph.case == GSLowercase:
glyphType = "Lowercase"
elif thisGlyph.case == GSSmallcaps:
glyphType = "Smallcaps"
else:
glyphType = thisGlyph.subCategory
if self.isNodeSlightlyOff(thisNode.position, thisLayer.master, deviance, previousY, nextY, glyphType, suffix):
# collect layer:
if not thisLayer in affectedLayers:
affectedLayers.append(thisLayer)
thisNode.selected = True
# report:
print("%s /%s ‘%s’: %.1f %.1f" % (
self.marker,
thisGlyph.name,
thisLayer.name,
thisNode.x, thisNode.y,
))
# node name:
if self.pref("markNodes"):
if self.pref("removeOverlap"):
self.addAnnotation(thisLayer, thisNode.position, self.marker)
else:
thisNode.name = self.marker
else:
self.doubleCheckNodeName(thisNode)
else:
self.doubleCheckNodeName(thisNode)
else:
self.doubleCheckNodeName(thisNode)
# make sure View options are on:
if self.pref("markNodes"):
if self.pref("removeOverlap"):
Glyphs.defaults["showAnnotations"]=1
else:
Glyphs.defaults["showNodeNames"]=1
# Done. Set Progress Bar to max and report:
self.w.progress.set(100)
self.w.status.set("✅ Done.")
if skippedGlyphs:
print()
print(f"Skipped glyphs:\n{', '.join(skippedGlyphs)}")
print()
print("Done.")
if affectedLayers:
if self.pref("openTab"):
# try to reuse current tab:
resultTab = thisFont.currentTab
if resultTab and self.pref("reuseTab"):
resultTab.layers = ()
else:
# open new tab:
resultTab = thisFont.newTab()
resultTab.layers = affectedLayers
else:
# brings macro window to front:
Glyphs.showMacroWindow()
else:
Message(
title="No Deviant Nodes",
message="Congratulations! No nodes found missing the indicated metrics and off by up to {self.pref('deviance')} u.",
OKButton="🥂Cheers!"
)
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print(f"Find Near Vertical Misses Error: {e}")
import traceback
print(traceback.format_exc())
FindNearVerticalMisses()
|
4982073ff0217be3395b4d90730b3ee7ee87df0d
|
af20739e34a39f0a0a99537ce047f502a3531ea5
|
/tests/extension/test_serverextension.py
|
b0c0fa446e126ed4373f6c7278c70022b9db60d6
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter-server/jupyter_server
|
66cf0ac45804aa5dd6bf8dff7050db02f3696b92
|
93fde1ad9fece22607960184501f5c9c80cd3765
|
refs/heads/main
| 2023-09-04T03:44:41.696097
| 2023-08-31T08:20:36
| 2023-08-31T08:20:36
| 68,849,978
| 237
| 186
|
BSD-3-Clause
| 2023-09-09T02:42:25
| 2016-09-21T19:18:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,680
|
py
|
test_serverextension.py
|
from collections import OrderedDict
import pytest
try:
from jupyter_core.paths import prefer_environment_over_user
except ImportError:
prefer_environment_over_user = None # type:ignore
from traitlets.tests.utils import check_help_all_output
from jupyter_server.config_manager import BaseJSONConfigManager
from jupyter_server.extension.serverextension import (
DisableServerExtensionApp,
ListServerExtensionsApp,
ServerExtensionApp,
ToggleServerExtensionApp,
_get_config_dir,
toggle_server_extension_python,
)
# Use ServerApps environment because it monkeypatches
# jupyter_core.paths and provides a config directory
# that's not cross contaminating the user config directory.
pytestmark = pytest.mark.usefixtures("jp_environ")
def test_help_output():
check_help_all_output("jupyter_server.extension.serverextension")
check_help_all_output("jupyter_server.extension.serverextension", ["enable"])
check_help_all_output("jupyter_server.extension.serverextension", ["disable"])
check_help_all_output("jupyter_server.extension.serverextension", ["install"])
check_help_all_output("jupyter_server.extension.serverextension", ["uninstall"])
def get_config(sys_prefix=True):
cm = BaseJSONConfigManager(config_dir=_get_config_dir(sys_prefix=sys_prefix))
data = cm.get("jupyter_server_config")
return data.get("ServerApp", {}).get("jpserver_extensions", {})
def test_enable(jp_env_config_path, jp_extension_environ):
toggle_server_extension_python("mock1", True)
config = get_config()
assert config["mock1"]
def test_disable(jp_env_config_path, jp_extension_environ):
toggle_server_extension_python("mock1", True)
toggle_server_extension_python("mock1", False)
config = get_config()
assert not config["mock1"]
@pytest.mark.skipif(prefer_environment_over_user is None, reason="Requires jupyter_core 5.0+")
def test_merge_config(jp_env_config_path, jp_configurable_serverapp, jp_extension_environ):
# Toggle each extension module with a JSON config file
# at the sys-prefix config dir.
toggle_server_extension_python(
"tests.extension.mockextensions.mockext_sys",
enabled=True,
sys_prefix=True,
)
toggle_server_extension_python(
"tests.extension.mockextensions.mockext_user",
enabled=True,
user=True,
)
# Write this configuration in two places, sys-prefix and user.
# sys-prefix supercedes users, so the extension should be disabled
# when these two configs merge.
toggle_server_extension_python(
"tests.extension.mockextensions.mockext_both",
enabled=False,
sys_prefix=True,
)
toggle_server_extension_python(
"tests.extension.mockextensions.mockext_both",
enabled=True,
user=True,
)
mockext_py = "tests.extension.mockextensions.mockext_py"
argv = ["--ServerApp.jpserver_extensions", f"{mockext_py}=True"]
# Enable the last extension, mockext_py, using the CLI interface.
app = jp_configurable_serverapp(config_dir=str(jp_env_config_path), argv=argv)
# Verify that extensions are enabled and merged in proper order.
extensions = app.jpserver_extensions
assert extensions["tests.extension.mockextensions.mockext_user"]
assert extensions["tests.extension.mockextensions.mockext_sys"]
assert extensions["tests.extension.mockextensions.mockext_py"]
# Merging should causes this extension to be disabled.
if prefer_environment_over_user():
assert not extensions["tests.extension.mockextensions.mockext_both"]
@pytest.mark.parametrize(
"jp_server_config",
[
{
"ServerApp": {
"jpserver_extensions": OrderedDict(
[
("tests.extension.mockextensions.mock2", True),
("tests.extension.mockextensions.mock1", True),
]
)
}
}
],
)
def test_load_ordered(jp_serverapp, jp_server_config):
assert jp_serverapp.mockII is True, "Mock II should have been loaded"
assert jp_serverapp.mockI is True, "Mock I should have been loaded"
assert jp_serverapp.mock_shared == "II", "Mock II should be loaded after Mock I"
def test_server_extension_apps(jp_env_config_path, jp_extension_environ):
app = ToggleServerExtensionApp()
app.extra_args = ["mock1"]
app.start()
app2 = DisableServerExtensionApp()
app2.extra_args = ["mock1"]
app2.start()
app3 = ListServerExtensionsApp()
app3.start()
def test_server_extension_app():
app = ServerExtensionApp()
app.launch_instance(["list"])
|
fc7e33251ad2b3f08da50291d2fb65d5600cd7f6
|
e8846f706a428a91659ac6e24974dc696089fe4a
|
/pandapower/test/timeseries/test_data_source.py
|
46d7a690d721c8745f9ee4ecaeff7b94bdf1a8de
|
[
"BSD-3-Clause"
] |
permissive
|
e2nIEE/pandapower
|
3e434bf81b29e9c88905abbd82fd0309e2191ffb
|
5592ba1f6fcd727053a37dcf246b9bf36874c24a
|
refs/heads/develop
| 2023-09-03T23:21:25.979973
| 2023-08-31T11:00:17
| 2023-08-31T11:00:17
| 78,748,060
| 608
| 481
|
NOASSERTION
| 2023-09-14T18:22:08
| 2017-01-12T13:27:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
test_data_source.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2023 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import os
import pandas as pd
import pytest
import copy
import pandapower.control
import pandapower.timeseries
from pandapower import pp_dir
epsilon = 0.00000000000001
def test_data_source():
"""
Testing simply reading from file and checking the data.
"""
# load file
filename = os.path.join(pp_dir, "test", "timeseries", "test_files", "small_profile.csv")
df = pd.read_csv(filename, sep=";")
my_data_source = pandapower.timeseries.DFData(df)
copy.deepcopy(my_data_source)
assert my_data_source.get_time_step_value(time_step=0, profile_name="my_profilename") == 0.0
assert my_data_source.get_time_step_value(time_step=3, profile_name="my_profilename") == 0.0
assert abs(my_data_source.get_time_step_value(time_step=4, profile_name="my_profilename")
- -3.97E-1) < epsilon
assert abs(my_data_source.get_time_step_value(time_step=8, profile_name="constload3")
- -5.37E-3) < epsilon
if __name__ == '__main__':
pytest.main(['-x', '-s', __file__])
# pytest.main(['-x', __file__])
|
6cf79b9ae3f5000310893309f9d3140c4e6c3780
|
fbbffcc0c0d689b0bca759c37399374c3772f922
|
/weChatApi/testwechatsendfilemessage.py
|
0a834fa09b14d404ad4568e0f81507bf40d0b529
|
[] |
no_license
|
holdyeah/wechat-pc-hook-python
|
147872ce9818541e367319ea63eba40ddbb90ad5
|
763a882de5c8aa4f9bb71855a74e02ee66a9e563
|
refs/heads/master
| 2023-03-17T06:17:30.988003
| 2023-03-04T08:44:37
| 2023-03-04T08:44:37
| 204,273,242
| 284
| 86
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
testwechatsendfilemessage.py
|
import requests
import urllib
url = "http://127.0.0.1:2020/file"
test_str = "D:/2.mp4"
test22 = urllib.parse.quote(test_str)
payload='wxid=filehelper&msg='+ test22
print(payload)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(url, headers=headers, data=payload)
print(response.text)
|
5f30588fbd449735adae1498b9a396eb12a3bbec
|
6f54ba22cf13c8168ecdae96bce068bcc74c1ecc
|
/uatest/reconnection_server.py
|
b41efa817d2bfce163644794a420014b20fe833d
|
[
"MIT"
] |
permissive
|
gopcua/opcua
|
b318aeea308cb41de1fdb2ea713e747a78ad577d
|
10b83a76e2e7eebbdf9417f20f479594da5e5e76
|
refs/heads/main
| 2023-08-31T09:32:47.680706
| 2023-08-29T12:19:53
| 2023-08-29T12:19:53
| 142,013,463
| 693
| 236
|
MIT
| 2023-09-05T09:20:41
| 2018-07-23T12:48:35
|
Go
|
UTF-8
|
Python
| false
| false
| 2,533
|
py
|
reconnection_server.py
|
#!/usr/bin/env python3
from opcua import ua, Server
from opcua.common.callback import CallbackType
from opcua.ua.ua_binary import struct_to_binary, header_to_binary
server = None
# HACK: opcua does not support sending error message
def send_error_message(status_code, reason="test bench simulation"):
# take the first transport they all have the same processor
transport = server.iserver.asyncio_transports[0]
processor = transport._protocol.processor
response = ua.ErrorMessage()
response.Error = status_code
response.Reason = reason
processor.socket.write(error_message_to_binary(response))
# HACK: opcua does not support sending error message
def error_message_to_binary(message):
header = ua.Header(ua.MessageType.Error, ua.ChunkType.Single)
body = struct_to_binary(message)
header.body_size = len(body)
return header_to_binary(header) + body
# On connection_lost the server remove the securechannel and the session
def simulate_connection_failure(parent):
def close_connection():
for transport in server.iserver.asyncio_transports:
transport.close()
server.iserver.loop.call_soon(close_connection)
return []
def simulate_securechannel_failure(parent):
server.iserver.loop.call_soon(lambda : send_error_message(ua.StatusCode(ua.StatusCodes.BadSecureChannelIdInvalid)))
return []
def simulate_session_failure(parent):
server.iserver.loop.call_soon(lambda : send_error_message(ua.StatusCode(ua.StatusCodes.BadSessionIdInvalid)))
return []
def simulate_subscription_failure(parent):
server.iserver.loop.call_soon(lambda : send_error_message(ua.StatusCode(ua.StatusCodes.BadSubscriptionIdInvalid)))
return []
if __name__ == "__main__":
server = Server()
server.set_endpoint("opc.tcp://0.0.0.0:4840/")
ns = server.register_namespace("http://gopcua.com/")
simulations = server.nodes.objects.add_object(ua.NodeId("simulations", ns), "simulations")
fnEven = simulations.add_method(ua.NodeId("simulate_connection_failure", ns), "simulate_connection_failure", simulate_connection_failure, [], [])
fnEven = simulations.add_method(ua.NodeId("simulate_securechannel_failure", ns), "simulate_securechannel_failure", simulate_securechannel_failure, [], [])
fnEven = simulations.add_method(ua.NodeId("simulate_session_failure", ns), "simulate_session_failure", simulate_session_failure, [], [])
fnEven = simulations.add_method(ua.NodeId("simulate_subscription_failure", ns), "simulate_subscription_failure", simulate_subscription_failure, [], [])
server.start()
|
fd7890609838f0a3601716f0bebb0dea1fe526d3
|
028ddc5e85d89c26f8320b70d8ffe80f3d5aec52
|
/src/UQpy/surrogates/polynomial_chaos/polynomials/PolynomialsND.py
|
31e6a2b5b019efff44463619cfba95f970cdb0a9
|
[
"MIT"
] |
permissive
|
SURGroup/UQpy
|
3b516706e9072c6fac80da0bdfbd23e2193f5844
|
9e98a6279aa5a2ec2d6d4c61226c34712547bcc6
|
refs/heads/master
| 2023-09-04T03:38:35.294389
| 2023-08-04T12:55:02
| 2023-08-04T12:55:02
| 112,795,497
| 215
| 70
|
MIT
| 2023-09-14T14:18:22
| 2017-12-01T23:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,868
|
py
|
PolynomialsND.py
|
import numpy as np
from UQpy.surrogates.polynomial_chaos.polynomials.baseclass.Polynomials import Polynomials
class PolynomialsND(Polynomials):
def __init__(self, distributions, multi_index):
"""
Class for multivariate Wiener-Askey chaos polynomials.
:param distributions: Joint probability distribution.
:param multi_index: Polynomial multi-degree (multi-index).
"""
self.multi_index = multi_index
self.distributions = distributions
marginals = distributions.marginals
N = len(multi_index) # dimensions
self.polynomials1d = [Polynomials.distribution_to_polynomial[type(marginals[n])]
(distributions=marginals[n], degree=int(multi_index[n])) for n in range(N)]
def evaluate(self, eval_data) ->np.ndarray:
"""
Evaluate Nd chaos polynomial on the given data set.
:param eval_data: Points upon which the ND chaos polynomial will be evaluated.
:return: Evaluations of the ND chaos polynomial.
"""
try: # case: 2d array, K x N, N being the number of dimensions
K, N = np.shape(eval_data)
except: # case: 1d array, 1 x N, N being the number of dimensions
K = 1
N = len(eval_data)
eval_data = eval_data.reshape(K, N)
# Store evaluations of 1d polynomials in a KxN matrix. Each column has
# the evaluations of the n-th 1d polynomial on the n-th data column,
# i.e. on the values of the n-th parameter
eval_matrix = np.empty([K, N])
for n in range(N):
eval_matrix[:, n] = self.polynomials1d[n].evaluate(eval_data[:, n])
# The output of the multivariate polynomial is the product of the
# outputs of the corresponding 1d polynomials
return np.prod(eval_matrix, axis=1)
|
a2a3d4566a563a2654c71a2a872bf537c85604bd
|
9f8bfd5e566a6e69beb9d3f695ee048a63237f79
|
/nornir/core/plugins/register.py
|
f28553d69e12a4b5d990dd6a261127db7f998d07
|
[
"Apache-2.0"
] |
permissive
|
nornir-automation/nornir
|
4ae96909d06210e2493113d6f42f8ed6a91bbe6f
|
719b57c770aa72127578a13bbc3162f0b797e54a
|
refs/heads/main
| 2023-08-30T10:30:42.075614
| 2023-08-30T07:39:32
| 2023-08-30T07:39:32
| 76,636,028
| 1,206
| 279
|
Apache-2.0
| 2023-08-30T07:39:34
| 2016-12-16T08:27:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,334
|
py
|
register.py
|
import sys
from typing import Dict, TypeVar, Generic
from nornir.core.exceptions import (
PluginAlreadyRegistered,
PluginNotRegistered,
)
if sys.version_info >= (3, 10):
from importlib import metadata
else:
import importlib_metadata as metadata
T = TypeVar("T")
class PluginRegister(Generic[T]):
available: Dict[str, T] = {}
def __init__(self, entry_point: str) -> None:
self._entry_point = entry_point
def auto_register(self) -> None:
for entry_point in metadata.entry_points(group=self._entry_point):
self.register(entry_point.name, entry_point.load())
def register(self, name: str, plugin: T) -> None:
"""Registers a plugin with a specified name
Args:
name: name of the connection plugin to register
plugin: plugin class
Raises:
:obj:`nornir.core.exceptions.PluginAlreadyRegistered` if
another plugin with the specified name was already registered
"""
existing_plugin = self.available.get(name)
if existing_plugin is None:
self.available[name] = plugin
elif existing_plugin != plugin:
raise PluginAlreadyRegistered(
f"plugin {plugin} can't be registered as "
f"{name!r} because plugin {existing_plugin} "
f"was already registered under this name"
)
def deregister(self, name: str) -> None:
"""Deregisters a registered plugin by its name
Args:
name: name of the plugin to deregister
Raises:
:obj:`nornir.core.exceptions.PluginNotRegistered`
"""
if name not in self.available:
raise PluginNotRegistered(f"plugin {name!r} is not registered")
self.available.pop(name)
def deregister_all(self) -> None:
"""Deregisters all registered plugins"""
self.available = {}
def get_plugin(self, name: str) -> T:
"""Fetches the plugin by name if already registered
Args:
name: name of the plugin
Raises:
:obj:`nornir.core.exceptions.PluginNotRegistered`
"""
if name not in self.available:
raise PluginNotRegistered(f"plugin {name!r} is not registered")
return self.available[name]
|
d4f623d9d9c8cf5367d396381ca7bfe27e72f724
|
3782e25b6db35d82d63bb81e398deab85ef2236e
|
/Autocoders/Python/src/fprime_ac/utils/DictTypeConverter.py
|
e757226a7b8a5f1b99887ea8387b40673215cd49
|
[
"Apache-2.0"
] |
permissive
|
nasa/fprime
|
e0c8d45dfc0ff08b5ef6c42a31f47430ba92c956
|
a56426adbb888ce4f5a8c6a2be3071a25b11da16
|
refs/heads/devel
| 2023-09-03T15:10:33.578646
| 2023-08-29T15:39:59
| 2023-08-29T15:39:59
| 95,114,723
| 10,071
| 1,426
|
Apache-2.0
| 2023-09-08T14:31:00
| 2017-06-22T12:45:27
|
C++
|
UTF-8
|
Python
| false
| false
| 2,804
|
py
|
DictTypeConverter.py
|
import sys
class DictTypeConverter:
def __init__(self):
pass
def convert(self, t, size):
# check for various type variations
type_string = ""
type_name = t
ser_import = None
use_size = not size is None
# check for enums
if isinstance(t, tuple):
# extract enumeration arguments
# to match the C rules, we have to start
# counting member values from 0 or the
# last member value
curr_memb_val = 0
# make sure it's an enum
if t[0][0].upper() != "ENUM":
print("ERROR: Expected ENUM type in channel args list...")
sys.exit(-1)
type_string += 'EnumType("' + t[0][1] + '",{'
for (mname, mval, mcomment) in t[1]:
# check for member value
if mval is not None:
curr_memb_val = int(mval)
type_string += '"%s":%d,' % (mname, curr_memb_val)
curr_memb_val += 1
type_string += "})"
type_name = "enum"
# otherwise, lookup type translation in table
elif t == "string":
use_size = False
type_string += "StringType(max_string_len=%s)" % size
else:
type_lookup = {
"U8": "U8Type()",
"I8": "I8Type()",
"I16": "I16Type()",
"U16": "U16Type()",
"I32": "I32Type()",
"U32": "U32Type()",
"I64": "I64Type()",
"U64": "U64Type()",
"F32": "F32Type()",
"F64": "F64Type()",
"bool": "BoolType()",
}
if t in type_lookup:
type_string += type_lookup[t]
else: # set up serializable imports
# Path to serializable is going to be the namespace.type
ser_type = t.split("::")
type_string += "{}.{}()".format(".".join(ser_type), ser_type[-1])
ser_import = ".".join(ser_type)
return (type_string, ser_import, type_name, use_size)
def format_replace(self, format_string, spec_num, old, new):
"""
Search the format specifier string and replace tokens
Mainly a special case to handle enumerations. Software
needs "%d", while Gse needs "%s"
spec_num = instance of token (0..n)
"""
# split into list
flist = format_string.split("%")
# make sure we're not looking past the list
if spec_num + 1 >= len(flist):
return None
# replace token
flist[spec_num + 1] = flist[spec_num + 1].replace(old, new, 1)
# rejoin string
return "%".join(flist)
|
9d4ad2c5918a03dec5538585b880351eeb33e16c
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/Decorators__examples/hello_world.py
|
38ae2dfa60df4644e2f546afa1941e8c63362aa9
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 738
|
py
|
hello_world.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import functools
def makebold(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return "<b>" + func(*args, **kwargs) + "</b>"
return wrapped
def makeitalic(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return "<i>" + func(*args, **kwargs) + "</i>"
return wrapped
def upper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs).upper()
return wrapped
@makebold
@makeitalic
@upper
def hello(text):
return text
print(hello("Hello World!"))
# <b><i>HELLO WORLD!</i></b>
assert hello("Hello World!") == "<b><i>HELLO WORLD!</i></b>"
|
1a5e21c52a3374d47c21da28dc081385444fbf6f
|
46732d613208ee4096fbbd3fd74f22146471d1ce
|
/wechat-friends_info/weixin朋友城市.py
|
d5629d3bc30dd56f76a76b99abb87a0b34b2b7e0
|
[] |
no_license
|
cassieeric/python_crawler
|
7cb02f612382801ae024e2cee70e0c2bcdba927c
|
6d2b4db3d34183d729f6fd30555c6d6f04514260
|
refs/heads/master
| 2022-11-30T20:30:50.031960
| 2022-11-27T02:53:22
| 2022-11-27T02:53:22
| 118,204,154
| 322
| 283
| null | 2022-12-21T09:33:08
| 2018-01-20T03:17:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 440
|
py
|
weixin朋友城市.py
|
# -*- coding: utf-8 -*-
import itchat
import pandas as pd
def get_info():
itchat.login()
friends = itchat.get_friends(update=True)
df_friends = pd.DataFrame(friends)
City = df_friends.City
City_count = City.value_counts()
# 有一些好友地理信息为空,过滤掉这一部分人。
City_count = City_count[City_count.index != '']
print(City_count)
# 运行程序后,扫码登录即可
get_info()
|
b349db98ad26231abf0fd65f1c22a97668220688
|
2c6e9374fe1216a99d833354213ca676407e1197
|
/mqtt_as/mqtt_as_timeout.py
|
8927ace56289f52cc086542de32292ab7a1224ab
|
[
"MIT"
] |
permissive
|
peterhinch/micropython-mqtt
|
7729d2b00ad2e41440fe278a0aec938537edebf8
|
041b0213caee0d990cb0eb454b9e627bf515edde
|
refs/heads/master
| 2023-09-01T08:37:26.372051
| 2023-08-24T09:52:42
| 2023-08-24T09:52:42
| 94,690,722
| 456
| 118
|
MIT
| 2023-02-03T14:54:07
| 2017-06-18T14:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
mqtt_as_timeout.py
|
# mqtt_as_timeout.py Implementation of a timeout on publication.
# (C) Copyright 2019 Kevin Köck.
# Released under the MIT licence.
# This solution detects the case where a publication is delayed by lack of
# connectivity and cancels it if the delay exceeds a timeout.
# Note that it blocks other attempts at publication while waiting for a PUBACK,
# counter to the normal operation of the module but uses less RAM than the
# implementation with concurrent operations.
# The occurrence of a timeout does not guarantee non-reception of the message:
# connectivity loss may occur between reception by the broker and reception of
# CONNACK by the client. However in this case the message would be received in
# a timely fashion.
from mqtt_as import MQTTClient as _MQTTClient
import time
import uasyncio as asyncio
class MQTTClient(_MQTTClient):
_pub_task = None
# Await broker connection. Subclassed to reduce canceling time from 1s to 50ms
async def _connection(self):
while not self._isconnected:
await asyncio.sleep_ms(50)
async def _publishTimeout(self, topic, msg, retain, qos):
try:
await super().publish(topic, msg, retain, qos)
finally:
self._pub_task = None
async def publish(self, topic, msg, retain=False, qos=0, timeout=None):
task = None
start = time.ticks_ms()
while timeout is None or time.ticks_diff(time.ticks_ms(), start) < timeout:
# Can't use wait_for because cancelling a wait_for would cancel _publishTimeout
# Also a timeout in wait_for would cancel _publishTimeout without waiting for
# the socket lock to be available, breaking mqtt protocol.
if self._pub_task is None and task is None:
task = asyncio.create_task(self._publishTimeout(topic, msg, retain, qos))
self._pub_task = task
elif task is not None:
if self._pub_task != task:
return # published
await asyncio.sleep_ms(20)
if task is not None:
async with self.lock:
task.cancel()
return
|
d2f8fd03512c7f457e3ec452968ff07411fae9da
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/win/gyptest-asm-files.py
|
007b52eb267688b6827356620a8f2a885bf29e20
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 722
|
py
|
gyptest-asm-files.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure .s files aren't passed to cl.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'asm-files'
test.run_gyp('asm-files.gyp', chdir=CHDIR)
# The compiler will error out if it's passed the .s files, so just make sure
# the build succeeds. The compiler doesn't directly support building
# assembler files on Windows, they have to be built explicitly with a
# third-party tool.
test.build('asm-files.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
b4c7d34e7a25100f6cc72b599a28f27178c51a4a
|
43c870b240a807db330a7abf53758c1fef924432
|
/cloudkitty/api/v1/__init__.py
|
1f1cfcb74b06e4d7ac7a3f3d3cf2968268ae879f
|
[
"Apache-2.0"
] |
permissive
|
openstack/cloudkitty
|
fb3cb83d8d89b27efe64768862f07781a2ef3b5a
|
94630b97cd1fb4bdd9a638070ffbbe3625de8aa2
|
refs/heads/master
| 2023-08-31T15:10:44.524252
| 2023-08-23T16:27:35
| 2023-08-23T20:01:01
| 20,042,606
| 103
| 57
|
Apache-2.0
| 2017-11-29T15:39:28
| 2014-05-22T00:52:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,694
|
py
|
__init__.py
|
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
import pecan
from cloudkitty.api.v1 import config as api_config
from cloudkitty.api.v1 import hooks
from cloudkitty import storage
api_opts = [
cfg.BoolOpt('pecan_debug',
default=False,
help='Toggle Pecan Debug Middleware.'),
]
CONF = cfg.CONF
CONF.register_opts(api_opts, group='api')
def get_pecan_config():
# Set up the pecan configuration
filename = api_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def get_api_app():
app_conf = get_pecan_config()
storage_backend = storage.get_storage()
app_hooks = [
hooks.RPCHook(),
hooks.StorageHook(storage_backend),
hooks.ContextHook(),
]
return pecan.make_app(
app_conf.app.root,
static_root=app_conf.app.static_root,
template_path=app_conf.app.template_path,
debug=CONF.api.pecan_debug,
force_canonical=getattr(app_conf.app, 'force_canonical', True),
hooks=app_hooks,
guess_content_type_from_ext=False,
)
|
b86d091d5840e02a11baafdd3c70f06fd51428d6
|
bbfc9f05efefe29b6ce9832bb3506efb900c1c93
|
/tests/integration/cvm/test_describe_instances_common_client.py
|
4b46898144887b42b0e3e46aa6681c12a8d9781e
|
[
"Apache-2.0"
] |
permissive
|
TencentCloud/tencentcloud-sdk-python
|
a2fab235926b0a27e9cfdf55e085a8bb15b3f506
|
6baf00a5a56ba58b6a1123423e0a1422d17a0201
|
refs/heads/master
| 2023-09-04T10:52:28.060438
| 2023-09-01T03:09:16
| 2023-09-01T03:09:16
| 130,147,399
| 594
| 300
|
Apache-2.0
| 2023-09-06T07:03:24
| 2018-04-19T02:23:12
|
Python
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
test_describe_instances_common_client.py
|
# -*- coding: utf8 -*-
import os
from tencentcloud.common import credential
from tencentcloud.common.common_client import CommonClient
def test_describe_instances():
cred = credential.Credential(
os.environ.get("TENCENTCLOUD_SECRET_ID"),
os.environ.get("TENCENTCLOUD_SECRET_KEY"))
client = CommonClient("cvm", '2017-03-12', cred, "ap-shanghai")
rsp = client.call_json("DescribeInstances", {"Limit": 10})
assert rsp["Response"]["TotalCount"] <= 10
|
834ab86782541177a9362fa79ae2658df2240153
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/eventhub/azure-eventhub-checkpointstoreblob/azure/eventhub/extensions/checkpointstoreblob/_vendor/storage/blob/_generated/models/_azure_blob_storage_enums.py
|
2df7b1ad6219c5d1b4156c9741e71bf46f189e43
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 12,339
|
py
|
_azure_blob_storage_enums.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
P4 = "P4"
P6 = "P6"
P10 = "P10"
P15 = "P15"
P20 = "P20"
P30 = "P30"
P40 = "P40"
P50 = "P50"
P60 = "P60"
P70 = "P70"
P80 = "P80"
HOT = "Hot"
COOL = "Cool"
ARCHIVE = "Archive"
class AccessTierOptional(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
P4 = "P4"
P6 = "P6"
P10 = "P10"
P15 = "P15"
P20 = "P20"
P30 = "P30"
P40 = "P40"
P50 = "P50"
P60 = "P60"
P70 = "P70"
P80 = "P80"
HOT = "Hot"
COOL = "Cool"
ARCHIVE = "Archive"
class AccessTierRequired(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
P4 = "P4"
P6 = "P6"
P10 = "P10"
P15 = "P15"
P20 = "P20"
P30 = "P30"
P40 = "P40"
P50 = "P50"
P60 = "P60"
P70 = "P70"
P80 = "P80"
HOT = "Hot"
COOL = "Cool"
ARCHIVE = "Archive"
class AccountKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STORAGE = "Storage"
BLOB_STORAGE = "BlobStorage"
STORAGE_V2 = "StorageV2"
FILE_STORAGE = "FileStorage"
BLOCK_BLOB_STORAGE = "BlockBlobStorage"
class ArchiveStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot"
REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool"
class BlobExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NEVER_EXPIRE = "NeverExpire"
RELATIVE_TO_CREATION = "RelativeToCreation"
RELATIVE_TO_NOW = "RelativeToNow"
ABSOLUTE = "Absolute"
class BlobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
BLOCK_BLOB = "BlockBlob"
PAGE_BLOB = "PageBlob"
APPEND_BLOB = "AppendBlob"
class BlockListType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
COMMITTED = "committed"
UNCOMMITTED = "uncommitted"
ALL = "all"
class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PENDING = "pending"
SUCCESS = "success"
ABORTED = "aborted"
FAILED = "failed"
class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INCLUDE = "include"
ONLY = "only"
class GeoReplicationStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the secondary location
"""
LIVE = "live"
BOOTSTRAP = "bootstrap"
UNAVAILABLE = "unavailable"
class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INFINITE = "infinite"
FIXED = "fixed"
class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AVAILABLE = "available"
LEASED = "leased"
EXPIRED = "expired"
BREAKING = "breaking"
BROKEN = "broken"
class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
LOCKED = "locked"
UNLOCKED = "unlocked"
class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
COPY = "copy"
DELETED = "deleted"
METADATA = "metadata"
SNAPSHOTS = "snapshots"
UNCOMMITTEDBLOBS = "uncommittedblobs"
VERSIONS = "versions"
TAGS = "tags"
class ListContainersIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
METADATA = "metadata"
DELETED = "deleted"
class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
LEGACY = "legacy"
POSIX = "posix"
class PremiumPageBlobAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
P4 = "P4"
P6 = "P6"
P10 = "P10"
P15 = "P15"
P20 = "P20"
P30 = "P30"
P40 = "P40"
P50 = "P50"
P60 = "P60"
P70 = "P70"
P80 = "P80"
class PublicAccessType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
CONTAINER = "container"
BLOB = "blob"
class QueryFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The quick query format type.
"""
DELIMITED = "delimited"
JSON = "json"
ARROW = "arrow"
class RehydratePriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""If an object is in rehydrate pending state then this header is returned with priority of
rehydrate. Valid values are High and Standard.
"""
HIGH = "High"
STANDARD = "Standard"
class SequenceNumberActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MAX = "max"
UPDATE = "update"
INCREMENT = "increment"
class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STANDARD_LRS = "Standard_LRS"
STANDARD_GRS = "Standard_GRS"
STANDARD_RAGRS = "Standard_RAGRS"
STANDARD_ZRS = "Standard_ZRS"
PREMIUM_LRS = "Premium_LRS"
class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Error codes returned by the service
"""
ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists"
ACCOUNT_BEING_CREATED = "AccountBeingCreated"
ACCOUNT_IS_DISABLED = "AccountIsDisabled"
AUTHENTICATION_FAILED = "AuthenticationFailed"
AUTHORIZATION_FAILURE = "AuthorizationFailure"
CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported"
CONDITION_NOT_MET = "ConditionNotMet"
EMPTY_METADATA_KEY = "EmptyMetadataKey"
INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions"
INTERNAL_ERROR = "InternalError"
INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo"
INVALID_HEADER_VALUE = "InvalidHeaderValue"
INVALID_HTTP_VERB = "InvalidHttpVerb"
INVALID_INPUT = "InvalidInput"
INVALID_MD5 = "InvalidMd5"
INVALID_METADATA = "InvalidMetadata"
INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue"
INVALID_RANGE = "InvalidRange"
INVALID_RESOURCE_NAME = "InvalidResourceName"
INVALID_URI = "InvalidUri"
INVALID_XML_DOCUMENT = "InvalidXmlDocument"
INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue"
MD5_MISMATCH = "Md5Mismatch"
METADATA_TOO_LARGE = "MetadataTooLarge"
MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader"
MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter"
MISSING_REQUIRED_HEADER = "MissingRequiredHeader"
MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode"
MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported"
OPERATION_TIMED_OUT = "OperationTimedOut"
OUT_OF_RANGE_INPUT = "OutOfRangeInput"
OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue"
REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge"
RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch"
REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse"
RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists"
RESOURCE_NOT_FOUND = "ResourceNotFound"
SERVER_BUSY = "ServerBusy"
UNSUPPORTED_HEADER = "UnsupportedHeader"
UNSUPPORTED_XML_NODE = "UnsupportedXmlNode"
UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter"
UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb"
APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet"
BLOB_ALREADY_EXISTS = "BlobAlreadyExists"
BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy"
BLOB_NOT_FOUND = "BlobNotFound"
BLOB_OVERWRITTEN = "BlobOverwritten"
BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength"
BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit"
BLOCK_LIST_TOO_LONG = "BlockListTooLong"
CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier"
CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource"
CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists"
CONTAINER_BEING_DELETED = "ContainerBeingDeleted"
CONTAINER_DISABLED = "ContainerDisabled"
CONTAINER_NOT_FOUND = "ContainerNotFound"
CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit"
COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported"
COPY_ID_MISMATCH = "CopyIdMismatch"
FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch"
INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch"
INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot"
INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired"
INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock"
INVALID_BLOB_TIER = "InvalidBlobTier"
INVALID_BLOB_TYPE = "InvalidBlobType"
INVALID_BLOCK_ID = "InvalidBlockId"
INVALID_BLOCK_LIST = "InvalidBlockList"
INVALID_OPERATION = "InvalidOperation"
INVALID_PAGE_RANGE = "InvalidPageRange"
INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType"
INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl"
INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation"
LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent"
LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken"
LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation"
LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation"
LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation"
LEASE_ID_MISSING = "LeaseIdMissing"
LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired"
LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged"
LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed"
LEASE_LOST = "LeaseLost"
LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation"
LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation"
LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation"
MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet"
NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation"
NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation"
OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob"
PENDING_COPY_OPERATION = "PendingCopyOperation"
PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer"
PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound"
PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported"
SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet"
SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge"
SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded"
SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded"
SNAPSHOTS_PRESENT = "SnapshotsPresent"
SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet"
SYSTEM_IN_USE = "SystemInUse"
TARGET_CONDITION_NOT_MET = "TargetConditionNotMet"
UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite"
BLOB_BEING_REHYDRATED = "BlobBeingRehydrated"
BLOB_ARCHIVED = "BlobArchived"
BLOB_NOT_ARCHIVED = "BlobNotArchived"
AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch"
AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch"
AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch"
AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch"
AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch"
|
77d5c6cfaab90084443347a86e80d377ce912af9
|
1228750f9b95c5c2fb9a1d5cb339275db979356b
|
/petridish/data/misc.py
|
7d0300014373746a9be4932f5560a3fb1acf8ca1
|
[
"MIT"
] |
permissive
|
microsoft/petridishnn
|
be0236b9385c7523ca71cfd171f95beaca5d851a
|
0e0431a56db893ef8ee14501f12bf7046d4d6024
|
refs/heads/master
| 2023-06-29T20:58:01.980267
| 2023-06-12T18:22:32
| 2023-06-12T18:22:32
| 180,651,701
| 123
| 24
|
MIT
| 2023-06-12T18:22:33
| 2019-04-10T19:39:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,116
|
py
|
misc.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import scipy.io.arff as arff
import bisect
import os, sys
import json
from tensorpack.dataflow import RNGDataFlow, BatchData, PrefetchData, imgaug
from tensorpack.callbacks import Inferencer
def preprocess_data_flow(ds, options, is_train, do_multiprocess=False):
ds_size = ds.size()
while options.batch_size > ds_size:
options.batch_size //= 2
ds = BatchData(ds, max(1, options.batch_size // options.nr_gpu),
remainder=not is_train)
if do_multiprocess:
ds = PrefetchData(ds, 5, 5)
return ds
class Cutout(imgaug.ImageAugmentor):
def __init__(self, length=8, n_holes=1):
self._init(locals())
def _augment(self, img, _):
h, w = img.shape[:2]
mask = np.ones((h, w), np.float32)
for _ in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
if len(img.shape) == 3:
mask = np.reshape(mask, [h, w, 1])
img = img * mask
return img
class AgeOnlyCallBack(Inferencer):
def __init__(self, n_layers, save_fn=None):
self.names = [ 'input', 'label', 'layer{:03d}.0.pred/preds:0'.format(n_layers - 1)]
self.save_fn = save_fn
def _before_inference(self):
if self.save_fn:
self.fout = open(self.save_fn, 'wt')
else:
self.fout = sys.stdout
def _get_fetches(self):
return self.names
def _on_fetches(self, output):
#self.fout.write("{}\t{}\t{}\n".format(output[0], output[1], output[2]))
self.fout.write("{}\n".format(' '.join(map(lambda z: str(z[1]), output[2]))))
self.fout.flush()
#print('{} {} {}\n'.format(output[0], output[1], output[2]))
def _after_inference(self):
if self.save_fn:
self.fout.close()
class LoadedDataFlow(RNGDataFlow):
def __init__(self, xs, ys, shuffle=True, split='train', do_validation=False):
assert len(xs) == len(ys)
self.shuffle = shuffle
self.dps = [xs, ys]
n_samples = len(xs)
self.split = split
if self.split == 'all':
self._offset = 0
self._size = n_samples
elif self.split == 'train':
self._offset = 0
if do_validation:
self._size = n_samples * 8 // 10
else:
self._size = n_samples * 9 // 10
elif self.split == 'val' or self.split == 'validation':
if do_validation:
self._offset = n_samples * 8 // 10
self._size = n_samples * 9 // 10 - self._offset
else:
self._offset = n_samples * 9 // 10
self._size = n_samples - self._offset
def size(self):
return self._size
def get_data(self):
idxs = list(range(self._offset, self._offset + self._size))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
#print('{} : {} '.format(k, [dp[k] for dp in self.dps]))
yield [dp[k] for dp in self.dps]
def get_csv_data(fn, delim=' ', target_dim=-1, num_classes=2):
if num_classes > 0:
parse_y = lambda y : int(float(y))
y_type = int
else:
parse_y = lambda y : float(y)
y_type = np.float32
with open(fn, 'rt') as fin:
xs, ys = [], []
for line in fin:
line = line.strip().split(delim)
if target_dim < 0:
target_dim = len(line) + target_dim
xs.append(list(map(float, line[0:target_dim] + line[target_dim+1:len(line)])))
ys.append(parse_y(line[target_dim]))
xs = np.asarray(xs, dtype=np.float32).reshape([len(xs), -1])
ys = np.asarray(ys, dtype=y_type)
return xs, ys, len(xs[0]), num_classes
|
1f230d7edf05fca8047a87b3298d5200e7752e67
|
abb035c51b15464d2caa454ecc439b883a726c59
|
/utils.py
|
1693bfeeed7195c82edb09f8ac9a28aabf6c67f6
|
[
"Apache-2.0"
] |
permissive
|
allenai/tpu_pretrain
|
03c2532882a48ae5bfd533b423cb90ba778b5918
|
10e8f262b9a28d3bef51a103ce6f7003e8bc19a9
|
refs/heads/master
| 2023-07-07T12:39:20.609724
| 2019-10-24T00:41:57
| 2019-10-24T00:41:57
| 207,008,877
| 136
| 13
|
Apache-2.0
| 2019-10-24T00:41:58
| 2019-09-07T18:21:12
|
Python
|
UTF-8
|
Python
| false
| false
| 12,675
|
py
|
utils.py
|
from argparse import ArgumentParser
import os
from pathlib import Path
import shutil
import glob
import logging
import json
import random
import numpy as np
from tempfile import TemporaryDirectory
from collections import namedtuple
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from pytorch_transformers.modeling_utils import WEIGHTS_NAME
def init(args):
# init logger
log_format = '%(asctime)-10s: %(message)s'
if args.log_file is not None and args.log_file != "":
Path(args.log_file).parent.mkdir(parents=True, exist_ok=True)
logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format=log_format)
logging.warning(f'This will get logged to file: {args.log_file}')
else:
logging.basicConfig(level=logging.INFO, format=log_format)
# create output dir
if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
assert 'bert' in args.output_dir.name, \
'''Output dir name has to contain `bert` or `roberta` for AutoModel.from_pretrained to correctly infer the model type'''
args.output_dir.mkdir(parents=True, exist_ok=True)
# set random seeds
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def get_args_parser_with_general_args():
parser = ArgumentParser()
parser.add_argument('--pregenerated_data', type=Path, required=True)
parser.add_argument('--output_dir', type=Path, required=True)
parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model. Either a path to the model dir or "
"selected from list: bert-base-uncased, bert-large-uncased, bert-base-cased, "
"bert-base-multilingual, bert-base-chinese, roberta-base, roberta-large")
parser.add_argument("--reduce_memory", action="store_true",
help="Store training data as on-disc memmaps to massively reduce memory usage")
parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1, help="Number of gradient accumulation steps")
parser.add_argument("--betas",
nargs=2,
type=float,
default=[0.9, 0.98],
help="tuple specifying AdamW beta weights")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--warmup_proportion",
type=float,
required=False,
help="Linear warmup over warmup_steps.")
parser.add_argument("--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--log-file', default=None, type=str)
parser.add_argument('--track_learning_rate',
action='store_true',
help="if true, will track learning rate in progress bar.")
return parser
def save_checkpoint(model, epoch, output_dir):
weights_name, ext = os.path.splitext(WEIGHTS_NAME)
save_comment=f'{epoch:04d}'
weights_name += f'-{save_comment}{ext}'
output_model_file = os.path.join(output_dir, weights_name)
logging.info(f"Saving fine-tuned model to: {output_model_file}")
state_dict = model.state_dict()
for t_name in state_dict:
t_val = state_dict[t_name]
state_dict[t_name] = t_val.to('cpu')
torch.save(state_dict, output_model_file)
def prepare_last_checkpoint(pretrained_model_name_or_path):
if not os.path.isdir(pretrained_model_name_or_path):
return 0 # It is probabaly a model name, not an input directory
weights_name, ext = os.path.splitext(WEIGHTS_NAME)
archive_files = sorted(glob.glob(f'{pretrained_model_name_or_path}/{weights_name}*{ext}'))
if len(archive_files) > 1 and archive_files[-1].endswith(WEIGHTS_NAME):
archive_file = archive_files[-2] # if the last file is `pytorch_model.bin`, ignore it and use the one before
else:
archive_file = archive_files[-1]
logging.info(f'Found {len(archive_files)} model files. Use the most recent, {archive_file}')
# extract epoch number (some/dir/pytorch_model-epochNumber.bin or some/dir/pytorch_model.bin
filename = archive_file.split('/')[-1]
assert filename.startswith(weights_name)
filename_without_ext = filename.split('.')[0]
splits = filename_without_ext.split('-')
if len(splits) == 1:
start_epoch = 0 # filename is `pytorch_model.bin`, do nothing
elif len(splits) == 2:
# filename is `pytorch_model-epochNumber.bin`
assert splits[0] == weights_name
# read epoch number to continue training from the last point
start_epoch = int(splits[1]) + 1
# copy `pytorch_model-epochNumber.bin` to `pytorch_model.bin`
# because that's what the `from_pretrained` is loading from
dest_filename = archive_file.replace(filename, WEIGHTS_NAME)
logging.info(f'For loading, copy {archive_file} to {dest_filename}')
shutil.copy(archive_file, dest_filename)
else:
assert False # wrong name
return start_epoch
def get_dataset_stats(args, n_tpu):
samples_per_epoch = []
for i in range(args.epochs):
epoch_file = args.pregenerated_data / f"epoch_{i}.json"
metrics_file = args.pregenerated_data / f"epoch_{i}_metrics.json"
if epoch_file.is_file() and metrics_file.is_file():
metrics = json.loads(metrics_file.read_text())
samples_per_epoch.append(metrics['num_training_examples'])
else:
if i == 0:
exit("No training data was found!")
print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
print("This script will loop over the available data, but training diversity may be negatively impacted.")
num_data_epochs = i
break
else:
num_data_epochs = args.epochs
total_train_examples = 0
for i in range(args.start_epoch, args.epochs):
# The modulo takes into account the fact that we may loop over limited epochs of data
total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]
num_train_optimization_steps = compute_num_steps_in_epoch(total_train_examples,
args.train_batch_size,
args.gradient_accumulation_steps,
n_tpu)
return num_data_epochs, num_train_optimization_steps
def compute_num_steps_in_epoch(num_samples: int, batch_size: int, grad_accum_steps: int, n_tpu: int):
return int(num_samples / batch_size / grad_accum_steps / n_tpu)
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")
def convert_example_to_features(example, tokenizer, max_seq_length):
tokens = example["tokens"]
segment_ids = example["segment_ids"]
is_random_next = example["is_random_next"]
masked_lm_positions = example["masked_lm_positions"]
masked_lm_labels = example["masked_lm_labels"]
assert len(tokens) == len(segment_ids) <= max_seq_length # The preprocessed data should be already truncated
input_ids = tokenizer.convert_tokens_to_ids(tokens)
masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[:len(input_ids)] = input_ids
mask_array = np.zeros(max_seq_length, dtype=np.bool)
mask_array[:len(input_ids)] = 1
segment_array = np.zeros(max_seq_length, dtype=np.bool)
segment_array[:len(segment_ids)] = segment_ids
lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
lm_label_array[masked_lm_positions] = masked_label_ids
features = InputFeatures(input_ids=input_array,
input_mask=mask_array,
segment_ids=segment_array,
lm_label_ids=lm_label_array,
is_next=is_random_next)
return features
class PregeneratedDataset(Dataset):
def __init__(self, training_path, epoch, tokenizer, num_data_epochs, reduce_memory=False):
self.tokenizer = tokenizer
self.epoch = epoch
self.data_epoch = epoch % num_data_epochs
data_file = training_path / f"epoch_{self.data_epoch}.json"
metrics_file = training_path / f"epoch_{self.data_epoch}_metrics.json"
assert data_file.is_file() and metrics_file.is_file()
metrics = json.loads(metrics_file.read_text())
num_samples = metrics['num_training_examples']
seq_len = metrics['max_seq_len']
self.temp_dir = None
self.working_dir = None
if reduce_memory:
self.temp_dir = TemporaryDirectory()
self.working_dir = Path(self.temp_dir.name)
input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',
mode='w+', dtype=np.int32, shape=(num_samples, seq_len))
input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
lm_label_ids[:] = -1
is_nexts = np.memmap(filename=self.working_dir/'is_nexts.memmap',
shape=(num_samples,), mode='w+', dtype=np.bool)
else:
input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)
input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)
is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool)
logging.info(f"Loading training examples for epoch {epoch} from {data_file}")
with data_file.open() as f:
for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")):
line = line.strip()
example = json.loads(line)
features = convert_example_to_features(example, tokenizer, seq_len)
input_ids[i] = features.input_ids
segment_ids[i] = features.segment_ids
input_masks[i] = features.input_mask
lm_label_ids[i] = features.lm_label_ids
is_nexts[i] = features.is_next
assert i == num_samples - 1 # Assert that the sample count metric was true
logging.info("Loading complete!")
self.num_samples = num_samples
self.seq_len = seq_len
self.input_ids = input_ids
self.input_masks = input_masks
self.segment_ids = segment_ids
self.lm_label_ids = lm_label_ids
self.is_nexts = is_nexts
def __len__(self):
return self.num_samples
def __getitem__(self, item):
return (torch.tensor(self.input_ids[item].astype(np.int64)),
torch.tensor(self.input_masks[item].astype(np.int64)),
torch.tensor(self.segment_ids[item].astype(np.int64)),
torch.tensor(self.lm_label_ids[item].astype(np.int64)),
torch.tensor(self.is_nexts[item].astype(np.int64)))
|
ba8205e9cb9a6efc3ece9b373d132ef0947499bc
|
f314fb70c20eee3baf5dc7b7258964e4ff6a397b
|
/vidaug/augmentors/group.py
|
560ed2b9d4064295199ddf1034fe945931a4af61
|
[
"MIT"
] |
permissive
|
okankop/vidaug
|
14d11942c8d6cbc0ea27430885585435529e1481
|
1c1ddf2640fe4a9171267d64ae5e3bd70c24d54a
|
refs/heads/master
| 2021-10-11T11:50:49.620375
| 2021-09-27T09:05:27
| 2021-09-27T09:05:27
| 136,959,442
| 357
| 81
|
MIT
| 2021-10-05T18:45:16
| 2018-06-11T17:32:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,704
|
py
|
group.py
|
"""
Augmenters that apply to a group of augmentations, like selecting
an augmentation from a list, or applying all the augmentations in
a list sequentially
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.HorizontalFlip(),
va.VerticalFlip() ])
List of augmenters:
* Sequential
* OneOf
* SomeOf
* Sometimes
"""
import numpy as np
import PIL
import random
class Sequential(object):
"""
Composes several augmentations together.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
random_order (bool): Whether to apply the augmentations in random order.
"""
def __init__(self, transforms, random_order=False):
self.transforms = transforms
self.rand = random_order
def __call__(self, clip):
if self.rand:
rand_transforms = self.transforms[:]
random.shuffle(rand_transforms)
for t in rand_transforms:
clip = t(clip)
else:
for t in self.transforms:
clip = t(clip)
return clip
class OneOf(object):
"""
Selects one augmentation from a list.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
select = random.choice(self.transforms)
clip = select(clip)
return clip
class SomeOf(object):
"""
Selects a given number of augmentation from a list.
Args:
transforms (list of "Augmentor" objects): The list of augmentations.
N (int): The number of augmentations to select from the list.
random_order (bool): Whether to apply the augmentations in random order.
"""
def __init__(self, transforms, N, random_order=True):
self.transforms = transforms
self.rand = random_order
if N > len(transforms):
raise TypeError('The number of applied augmentors should be smaller than the given augmentation number')
else:
self.N = N
def __call__(self, clip):
if self.rand:
tmp = self.transforms[:]
selected_trans = [tmp.pop(random.randrange(len(tmp))) for _ in range(self.N)]
for t in selected_trans:
clip = t(clip)
return clip
else:
indices = [i for i in range(len(self.transforms))]
selected_indices = [indices.pop(random.randrange(len(indices)))
for _ in range(self.N)]
selected_indices.sort()
selected_trans = [self.transforms[i] for i in selected_indices]
for t in selected_trans:
clip = t(clip)
return clip
class Sometimes(object):
"""
Applies an augmentation with a given probability.
Args:
p (float): The probability to apply the augmentation.
transform (an "Augmentor" object): The augmentation to apply.
Example: Use this this transform as follows:
sometimes = lambda aug: va.Sometimes(0.5, aug)
sometimes(va.HorizontalFlip)
"""
def __init__(self, p, transform):
self.transform = transform
if (p > 1.0) | (p < 0.0):
raise TypeError('Expected p to be in [0.0 <= 1.0], ' +
'but got p = {0}'.format(p))
else:
self.p = p
def __call__(self, clip):
if random.random() < self.p:
clip = self.transform(clip)
return clip
|
ecb0b24546e423dfae1b9f49a7fa4c2931508e3e
|
1e1f444c97bfef8c6831b820822cc94c89b8c20a
|
/mlfinlab/networks/graph.py
|
b2d247c8d9afbae846400f2e96bad33c7aed73e8
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hudson-and-thames/mlfinlab
|
4fa7be033a7be9725a577fd25e1d590648eb344b
|
79dcc7120ec84110578f75b025a75850eb72fc73
|
refs/heads/master
| 2023-03-28T11:58:55.171692
| 2021-12-01T08:04:50
| 2021-12-01T08:04:50
| 170,544,934
| 3,763
| 1,024
|
NOASSERTION
| 2023-03-25T00:52:55
| 2019-02-13T16:57:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
graph.py
|
"""
This file contains Graph classes, which create NetworkX's Graph objects from matrices.
"""
from abc import ABC
import networkx as nx
from matplotlib import pyplot as plt
class Graph(ABC):
"""
This Graph class is a parent class for different types of graphs such as a MST.
"""
def __init__(self, matrix_type):
"""
Initializes the Graph object and the Graph class attributes.
This includes the specific graph such as a MST stored as an attribute.
:param matrix_type: (str) Name of the matrix type (e.g. "distance" or "correlation").
"""
pass
def get_matrix_type(self):
"""
Returns the matrix type set at initialisation.
:return: (str) String of matrix type (eg. "correlation" or "distance").
"""
pass
def get_graph(self):
"""
Returns the Graph stored as an attribute.
:return: (nx.Graph) Returns a NetworkX graph object.
"""
pass
def get_difference(self, input_graph_two):
"""
Given two Graph with the same nodes, return a set of differences in edge connections.
:param input_graph_two: (Graph) A graph to compare self.graph against.
:return: (List) A list of unique tuples showing different edge connections.
"""
pass
def get_pos(self):
"""
Returns the dictionary of the nodes coordinates.
:return: (Dict) Dictionary of node coordinates.
"""
pass
def get_graph_plot(self):
"""
Returns the graph of the MST with labels.
Assumes that the matrix contains stock names as headers.
:return: (AxesSubplot) Axes with graph plot. Call plt.show() to display this graph.
"""
pass
def set_node_groups(self, industry_groups):
"""
Sets the node industry group, by taking in a dictionary of industry group to a list of node indexes.
:param industry_groups: (Dict) Dictionary of the industry name to a list of node indexes.
"""
pass
def set_node_size(self, market_caps):
"""
Sets the node sizes, given a list of market cap values corresponding to node indexes.
:param market_caps: (List) List of numbers corresponding to node indexes.
"""
pass
def get_node_sizes(self):
"""
Returns the node sizes as a list.
:return: (List) List of numbers representing node sizes.
"""
pass
def get_node_colours(self):
"""
Returns a map of industry group matched with list of nodes.
:return: (Dict) Dictionary of industry name to list of node indexes.
"""
pass
|
7908b622d4f80e5d98b4db86b3abaff6ab867a61
|
98c372a42cb7f04ad52e6630c0bb3d1eecd30e16
|
/testing/test_python.py
|
bad2904a81687e53411e32535093f051d218481b
|
[
"Apache-2.0"
] |
permissive
|
zippy84/vtkbool
|
73b9e6bd78ad1798e6aebf178025f5fe46500c7f
|
dca2f7cc9a242f5c33482ca7bbf6afa3bd4c0c7c
|
refs/heads/master
| 2023-09-05T02:58:37.643374
| 2023-08-31T20:45:22
| 2023-08-31T20:45:22
| 14,350,407
| 129
| 38
|
Apache-2.0
| 2022-08-21T20:26:51
| 2013-11-13T01:25:57
|
C++
|
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
test_python.py
|
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
# Copyright 2012-2023 Ronald Römer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.extend(['/home/zippy/vtkbool/build/lib/python3.10/site-packages/vtkbool'])
from vtkmodules.vtkFiltersSources import vtkCubeSource
from vtkmodules.vtkIOLegacy import vtkPolyDataWriter
from vtkBool import vtkPolyDataBooleanFilter
cubeA = vtkCubeSource()
cubeB = vtkCubeSource()
bf = vtkPolyDataBooleanFilter()
bf.SetInputConnection(0, cubeA.GetOutputPort())
bf.SetInputConnection(1, cubeB.GetOutputPort())
bf.Update()
|
3cbca638e76cf2391859bb1a6b4b24da6c46f664
|
0744dcc5394cebf57ebcba343747af6871b67017
|
/external/iotivity/iotivity_1.2-rel/resource/csdk/security/unittest/SConscript
|
3c3339eef416c86db9c14b623dd8594ba79bd5d8
|
[
"Apache-2.0",
"GPL-2.0-only",
"MIT",
"BSD-3-Clause"
] |
permissive
|
Samsung/TizenRT
|
96abf62f1853f61fcf91ff14671a5e0c6ca48fdb
|
1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686
|
refs/heads/master
| 2023-08-31T08:59:33.327998
| 2023-08-08T06:09:20
| 2023-08-31T04:38:20
| 82,517,252
| 590
| 719
|
Apache-2.0
| 2023-09-14T06:54:49
| 2017-02-20T04:38:30
|
C
|
UTF-8
|
Python
| false
| false
| 6,200
|
SConscript
|
# //******************************************************************
# //
# // Copyright 2015 Intel Mobile Communications GmbH All Rights Reserved.
# //
# //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# //
# // Licensed under the Apache License, Version 2.0 (the "License");
# // you may not use this file except in compliance with the License.
# // You may obtain a copy of the License at
# //
# // http://www.apache.org/licenses/LICENSE-2.0
# //
# // Unless required by applicable law or agreed to in writing, software
# // distributed under the License is distributed on an "AS IS" BASIS,
# // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# // See the License for the specific language governing permissions and
# // limitations under the License.
# //
# //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
import os
import os.path
# SConscript file for Local PKI google tests
gtest_env = SConscript('#extlibs/gtest/SConscript')
srmtest_env = gtest_env.Clone()
src_dir = srmtest_env.get('SRC_DIR')
target_os = srmtest_env.get('TARGET_OS')
######################################################################
# Build flags
######################################################################
with_upstream_libcoap = srmtest_env.get('WITH_UPSTREAM_LIBCOAP')
if with_upstream_libcoap == '1':
# For bring up purposes only, we manually copy the forked version to where the unforked version is downloaded.
srmtest_env.AppendUnique(CPPPATH = ['#extlibs/libcoap/libcoap/include'])
else:
# For bring up purposes only, the forked version will live here.
srmtest_env.AppendUnique(CPPPATH = ['../../connectivity/lib/libcoap-4.1.1/include'])
srmtest_env.PrependUnique(CPPPATH = [
'../../../c_common/oic_malloc/include',
'../../connectivity/inc',
'../../connectivity/inc/pkix',
'../../connectivity/api',
'../../connectivity/external/inc',
'../include/internal',
'../../logger/include',
'../../stack/include',
'../../stack/include/internal',
'../../../oc_logger/include',
'../../../../extlibs/cjson/',
'../provisioning/include',
'../include'
])
srmtest_env.AppendUnique(LIBPATH = [srmtest_env.get('BUILD_DIR')])
srmtest_env.PrependUnique(LIBS = ['ocsrm',
'oc_logger',
'connectivity_abstraction',
'coap'])
if srmtest_env.get('SECURED') == '1':
srmtest_env.AppendUnique(LIBS = ['mbedtls','mbedx509','mbedcrypto'])
if srmtest_env.get('LOGGING') == '1':
srmtest_env.AppendUnique(CPPDEFINES = ['TB_LOG'])
if srmtest_env.get('MULTIPLE_OWNER') == '1':
srmtest_env.AppendUnique(CPPDEFINES=['MULTIPLE_OWNER'])
if target_os == 'windows':
srmtest_env.AppendUnique(LINKFLAGS = ['/subsystem:CONSOLE'])
srmtest_env.AppendUnique(LIBS = ['advapi32', 'bcrypt', 'kernel32', 'ws2_32', 'iphlpapi', 'octbstack_static'])
else:
# TODO: Implement feature check.
srmtest_env.AppendUnique(CPPDEFINES = ['HAVE_LOCALTIME_R'])
srmtest_env.AppendUnique(LIBS = ['octbstack'])
######################################################################
# Source files and Targets
######################################################################
unittest = srmtest_env.Program('unittest', ['aclresourcetest.cpp',
'amaclresourcetest.cpp',
'pstatresource.cpp',
'doxmresource.cpp',
'policyengine.cpp',
'securityresourcemanager.cpp',
'credentialresource.cpp',
'srmutility.cpp',
'iotvticalendartest.cpp',
'base64tests.cpp',
'svcresourcetest.cpp',
'srmtestcommon.cpp',
'directpairingtest.cpp',
'crlresourcetest.cpp'])
Alias("test", [unittest])
unittest_src_dir = os.path.join(src_dir, 'resource', 'csdk', 'security', 'unittest') + os.sep
unittest_build_dir = os.path.join(srmtest_env.get('BUILD_DIR'), 'resource', 'csdk', 'security', 'unittest') + os.sep
srmtest_env.AppendUnique(CPPDEFINES = ['SECURITY_BUILD_UNITTEST_DIR='+unittest_build_dir.encode('string_escape')])
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_unittest.json'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_unittest_acl1.json'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_unittest_default_acl.json'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_svr_db.json'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_unittest.dat'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_unittest_acl1.dat'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_unittest_default_acl.dat'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_svr_db_prov.dat'))
srmtest_env.Alias("install", srmtest_env.Install( unittest_build_dir,
unittest_src_dir + 'oic_svr_db.dat'))
srmtest_env.AppendTarget('test')
if srmtest_env.get('TEST') == '1':
if target_os in ['linux', 'windows']:
out_dir = srmtest_env.get('BUILD_DIR')
result_dir = os.path.join(srmtest_env.get('BUILD_DIR'), 'test_out') + os.sep
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
srmtest_env.AppendENVPath('GTEST_OUTPUT', ['xml:'+ result_dir])
srmtest_env.AppendENVPath('LD_LIBRARY_PATH', [out_dir])
srmtest_env.AppendENVPath('PATH', [os.path.join(out_dir, 'resource', 'csdk')])
from tools.scons.RunTest import *
run_test(srmtest_env,'ut','resource/csdk/security/unittest/unittest')
|
|
f865ba70bf49a81592ed92375186c24baeffb443
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/telemetry/migrations/0001_initial.py
|
6171a9529b80dbedab536e69692b403fcc2251fb
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
0001_initial.py
|
# Generated by Django 3.2.13 on 2022-04-26 18:56
import django_extensions.db.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="BuildData",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("data", models.JSONField()),
],
options={
"verbose_name_plural": "Build data",
},
),
]
|
d12b47d47318c55d50a40b9a5530e2b25099b786
|
863bfa36852b84e2e2834abb67171e37d48b9b81
|
/returns/contrib/mypy/_structures/types.py
|
df9a004fdf1ba4b70850adf69b6f188f2328c718
|
[
"BSD-2-Clause"
] |
permissive
|
dry-python/returns
|
33f763a34439cb6aa5419f16c6f45f27610d82d4
|
ae8d9ffaf20c459296337b78ba5ecb2f98870f61
|
refs/heads/master
| 2023-09-04T07:33:39.768675
| 2023-09-01T20:45:59
| 2023-09-01T20:45:59
| 167,689,891
| 2,967
| 130
|
BSD-2-Clause
| 2023-09-11T15:18:58
| 2019-01-26T13:08:38
|
Python
|
UTF-8
|
Python
| false
| false
| 214
|
py
|
types.py
|
from typing import Union
from mypy.plugin import FunctionContext, MethodContext
#: We treat them equally when working with functions or methods.
CallableContext = Union[
FunctionContext,
MethodContext,
]
|
371ee716165321bc2506620e672541e8e7634a18
|
10dde921afb622b58054260ccddb61ca1c1fb3a7
|
/mask2polygons.py
|
3741623674c6e4ddd7b74f14f54948e26e5c01b4
|
[] |
no_license
|
ryouchinsa/Rectlabel-support
|
db8d4e995405f28a66dc717cc37c1a513a05cbce
|
6a328c93deaf9725ecfd322d70559ef0295aa084
|
refs/heads/master
| 2023-09-04T17:41:07.799819
| 2023-09-02T10:39:54
| 2023-09-02T10:39:54
| 86,496,548
| 486
| 76
| null | 2023-05-04T05:08:25
| 2017-03-28T19:00:03
|
Python
|
UTF-8
|
Python
| false
| false
| 657
|
py
|
mask2polygons.py
|
import cv2
import numpy as np
mask_path = 'grayscale_mask.png'
image = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
h, w = image.shape
line_width = int((h + w) * 0.5 * 0.0025)
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
contours_approx = []
polygons = []
for contour in contours:
epsilon = 0.001 * cv2.arcLength(contour, True)
contour_approx = cv2.approxPolyDP(contour, epsilon, True)
contours_approx.append(contour_approx)
polygon = contour_approx.flatten().tolist()
polygons.append(polygon)
cv2.drawContours(image, contours_approx, -1, 128, line_width)
cv2.imwrite('polygons.png', image)
|
4bf89c7dde8de8e2cd59533d924fc5e6db0aab80
|
e4fb3fba8bab17ab18587ce1dfa8414fa4074cdb
|
/partition/visualize.py
|
56b7c6743bd83a7fb864094284db73557d73ad68
|
[
"MIT"
] |
permissive
|
loicland/superpoint_graph
|
787f60a367cafdbd79b6b35d11f464e1b3de4488
|
0209777339327c9b327b6947af6c89b20bb45981
|
refs/heads/ssp+spg
| 2023-08-03T17:14:45.417753
| 2023-07-19T20:50:48
| 2023-07-19T20:50:48
| 111,715,635
| 747
| 241
|
MIT
| 2023-07-19T20:50:49
| 2017-11-22T17:52:02
|
Python
|
UTF-8
|
Python
| false
| false
| 5,663
|
py
|
visualize.py
|
"""
Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs
http://arxiv.org/abs/1711.09869
2017 Loic Landrieu, Martin Simonovsky
this functions outputs varied ply file to visualize the different steps
"""
import os.path
import numpy as np
import argparse
import sys
sys.path.append("./partition/")
from plyfile import PlyData, PlyElement
from provider import *
parser = argparse.ArgumentParser(description='Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs')
parser.add_argument('--dataset', default='s3dis', help='dataset name: sema3d|s3dis')
parser.add_argument('--ROOT_PATH', default='/mnt/bigdrive/loic/S3DIS', help='folder containing the ./data folder')
parser.add_argument('--res_file', default='../models/cv1/predictions_val', help='folder containing the results')
parser.add_argument('--supervized_partition',type=int, default=0)
parser.add_argument('--file_path', default='Area_1/conferenceRoom_1', help='file to output (must include the area / set in its path)')
parser.add_argument('--upsample', default=0, type=int, help='if 1, upsample the prediction to the original cloud (if the files is huge it can take a very long and use a lot of memory - avoid on sema3d)')
parser.add_argument('--ver_batch', default=0, type=int, help='Batch size for reading large files')
parser.add_argument('--output_type', default='igfpres', help='which cloud to output: i = input rgb pointcloud \
, g = ground truth, f = geometric features, p = partition, r = prediction result \
, e = error, s = SPG')
args = parser.parse_args()
#---path to data---------------------------------------------------------------
#root of the data directory
root = args.ROOT_PATH+'/'
rgb_out = 'i' in args.output_type
gt_out = 'g' in args.output_type
fea_out = 'f' in args.output_type
par_out = 'p' in args.output_type
res_out = 'r' in args.output_type
err_out = 'e' in args.output_type
spg_out = 's' in args.output_type
folder = os.path.split(args.file_path)[0] + '/'
file_name = os.path.split(args.file_path)[1]
if args.dataset == 's3dis':
n_labels = 13
if args.dataset == 'sema3d':
n_labels = 8
if args.dataset == 'vkitti':
n_labels = 13
if args.dataset == 'custom_dataset':
n_labels = 10
#---load the values------------------------------------------------------------
fea_file = root + "features/" + folder + file_name + '.h5'
if not os.path.isfile(fea_file) or args.supervized_partition:
fea_file = root + "features_supervision/" + folder + file_name + '.h5'
spg_file = root + "superpoint_graphs/" + folder + file_name + '.h5'
ply_folder = root + "clouds/" + folder
ply_file = ply_folder + file_name
res_file = args.res_file + '.h5'
if not os.path.isdir(root + "clouds/"):
os.mkdir(root + "clouds/" )
if not os.path.isdir(ply_folder ):
os.mkdir(ply_folder)
if (not os.path.isfile(fea_file)) :
raise ValueError("%s does not exist and is needed" % fea_file)
geof, xyz, rgb, graph_nn, labels = read_features(fea_file)
if (par_out or res_out) and (not os.path.isfile(spg_file)):
raise ValueError("%s does not exist and is needed to output the partition or result ply" % spg_file)
else:
graph_spg, components, in_component = read_spg(spg_file)
if res_out or err_out:
if not os.path.isfile(res_file):
raise ValueError("%s does not exist and is needed to output the result ply" % res_file)
try:
pred_red = np.array(h5py.File(res_file, 'r').get(folder + file_name))
if (len(pred_red) != len(components)):
raise ValueError("It looks like the spg is not adapted to the result file")
pred_full = reduced_labels2full(pred_red, components, len(xyz))
except OSError:
raise ValueError("%s does not exist in %s" % (folder + file_name, res_file))
#---write the output clouds----------------------------------------------------
if rgb_out:
print("writing the RGB file...")
write_ply(ply_file + "_rgb.ply", xyz, rgb)
if gt_out:
print("writing the GT file...")
prediction2ply(ply_file + "_GT.ply", xyz, labels, n_labels, args.dataset)
if fea_out:
print("writing the features file...")
geof2ply(ply_file + "_geof.ply", xyz, geof)
if par_out:
print("writing the partition file...")
partition2ply(ply_file + "_partition.ply", xyz, components)
if res_out and not bool(args.upsample):
print("writing the prediction file...")
prediction2ply(ply_file + "_pred.ply", xyz, pred_full+1, n_labels, args.dataset)
if err_out:
print("writing the error file...")
error2ply(ply_file + "_err.ply", xyz, rgb, labels, pred_full+1)
if spg_out:
print("writing the SPG file...")
spg2ply(ply_file + "_spg.ply", graph_spg)
if res_out and bool(args.upsample):
if args.dataset=='s3dis':
data_file = root + 'data/' + folder + file_name + '/' + file_name + ".txt"
xyz_up, rgb_up = read_s3dis_format(data_file, False)
elif args.dataset=='sema3d':#really not recommended unless you are very confident in your hardware
data_file = data_folder + file_name + ".txt"
xyz_up, rgb_up = read_semantic3d_format(data_file, 0, '', 0, args.ver_batch)
elif args.dataset=='custom_dataset':
data_file = data_folder + file_name + ".ply"
xyz_up, rgb_up = read_ply(data_file)
del rgb_up
pred_up = interpolate_labels(xyz_up, xyz, pred_full, args.ver_batch)
print("writing the upsampled prediction file...")
prediction2ply(ply_file + "_pred_up.ply", xyz_up, pred_up+1, n_labels, args.dataset)
|
db1d50f9d980d330c4dc6b2adc80f414d2444af1
|
59a05ac6f169fbc5a98a76707b4c31ea446e5d6a
|
/examples/kinematic_styles.py
|
32dcb73f1aeb79043c9abda5440303602f0826ce
|
[
"MIT"
] |
permissive
|
joferkington/mplstereonet
|
68afd6d3585e19257f70103a2d43eb3a9ceff6b1
|
89a3b5a028703452bfc5dbfc3d77af355f645485
|
refs/heads/master
| 2023-01-06T02:06:45.078929
| 2022-12-29T23:05:50
| 2022-12-29T23:05:50
| 4,018,357
| 143
| 55
|
MIT
| 2022-12-29T23:05:51
| 2012-04-13T17:16:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
kinematic_styles.py
|
"""
Demonstrating more control on the parameters (e.g. friction angle and lateral
limits) of the analysis and plotting style for planar sliding and toppling
failure. Similar for wedge failure.
"""
import matplotlib.pyplot as plt
import mplstereonet
import mplstereonet.kinematic_analysis as kinematic
# Set up the analysis with friction angle and lateral limits
P2 = kinematic.PlanarSliding(strike=0, dip=80, fric_angle=40, latlim=30)
T2 = kinematic.FlexuralToppling(strike=0, dip=80, fric_angle=40, latlim=30)
# Start plotting
fig, (ax1, ax2) = mplstereonet.subplots(ncols=2, figsize=(12,9))
# Customizing with the kwargs - example with planar sliding failure
P2.plot_kinematic(
daylight_kws = {'ec':'b', 'label':'Daylight Envelope'},
friction_kws = {'ec':'green', 'label':'Friction Cone (40$^\circ$)',
'ls':'-.'},
lateral_kws = {'color':'purple', 'label':'Lateral Limits ($\pm30^\circ$)',
'ls':'--'},
main_kws = {'color':'orange'},
secondary_kws = {'color':'cyan'},
slope_kws = {'color':'r', 'label':'Slope Face (80/090)'},
ax=ax1)
ax1.grid(linestyle=':')
ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05))
# Or alternatively, elements in the plot can be customized with the output
# artists - example with flexural toppling failure
T2_artists = T2.plot_kinematic(ax=ax2)
plt.setp(T2_artists['main'], color='orange')
plt.setp(T2_artists['slope'], color='r', label='Slope Face (80/090)')
plt.setp(T2_artists['slip'], color='b',
label='Slip Limit (Friction angle 40$^\circ$)')
plt.setp(T2_artists['lateral'], color='purple', ls='--')
# Set label on one lateral limit artist only to avoid duplicated labels
plt.setp(T2_artists['lateral'][0], label='Lateral Limits ($\pm30^\circ$)')
ax2.grid(linestyle=':')
ax2.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05))
plt.show()
|
e86906546d5709bb143c540a3d02b9fb77e10673
|
27b86f422246a78704e0e84983b2630533a47db6
|
/tests/test_05_tools/test_534_dwg_info.py
|
3b9e92c3d0eced9c637c8286aceef1c82a2dbdfa
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 988
|
py
|
test_534_dwg_info.py
|
# Copyright (c) 2022, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.dwginfo import dwg_info
R12 = "41 43 31 30 30 39"
R2000 = "41 43 31 30 31 35"
R2018 = "41 43 31 30 33 32"
R20XX = "41 43 31 30 33 33"
unknown1 = "32 32 31 30 33 32"
unknown2 = ""
def data(s) -> bytes:
return bytes(int(x, 16) for x in s.split())
@pytest.mark.parametrize(
"s,ver,rel",
[
(R12, "AC1009", "R12"),
(R2000, "AC1015", "R2000"),
(R2018, "AC1032", "R2018"),
(R20XX, "AC1033", "unknown"),
],
ids=["R12", "R2000", "R2018", "unknown"],
)
def test_detect(s, ver, rel):
info = dwg_info(data(s))
assert info.version == ver
assert info.release == rel
@pytest.mark.parametrize(
"s", [unknown1, unknown2],
ids=["invalid", "empty"],
)
def test_detect_invalid(s):
info = dwg_info(data(s))
assert info.version == "invalid"
assert info.release == "invalid"
if __name__ == "__main__":
pytest.main([__file__])
|
fede20e0b036ccaf8efcd9c0e757fd5781bc174b
|
f324cd2cbebd303fd34cd2e26fe1a51c44202d55
|
/test/acceptance/test_cli.py
|
dbe2cf42a290cd8019fb7705d1c6be5f689eaa1b
|
[
"MIT"
] |
permissive
|
Vimjas/vint
|
d71579154d177daf458ec68423a66055f90fa308
|
e12091830f0ae7311066b9d1417951182fb32eb5
|
refs/heads/master
| 2023-09-02T07:31:31.299270
| 2022-10-24T13:06:33
| 2022-10-24T13:06:33
| 20,857,415
| 191
| 11
|
MIT
| 2022-10-24T13:10:00
| 2014-06-15T14:38:32
|
Python
|
UTF-8
|
Python
| false
| false
| 5,707
|
py
|
test_cli.py
|
import unittest
from pathlib import Path
import json
import subprocess
import sys
class TestCLI(unittest.TestCase):
if sys.version_info <= (3,):
def assertRegex(self, string, pattern):
return super(TestCLI, self).assertRegexpMatches(string, pattern)
def assertReturnedStdoutEqual(self, expected_stdout, args):
got_stdout = '(no stdout)'
cmd = [sys.executable, '-m', 'vint'] + args
try:
got_stdout = subprocess.check_output(cmd, universal_newlines=True)
except subprocess.CalledProcessError as err:
print('Got stderr: `{err_message}`'.format(err_message=err))
finally:
print('Got stdout: `{stdout}`'.format(stdout=got_stdout))
self.assertEqual(expected_stdout, got_stdout)
def test_exec_vint_with_valid_file_on_project_root(self):
valid_file = str(Path('test', 'fixture', 'cli', 'valid1.vim'))
expected_output = ''
self.assertReturnedStdoutEqual(expected_output, [valid_file])
def test_exec_vint_with_valid_file_encoded_cp932_on_project_root(self):
valid_file = str(Path('test', 'fixture', 'cli', 'valid-cp932.vim'))
expected_output = ''
self.assertReturnedStdoutEqual(expected_output, [valid_file])
def test_exec_vint_with_invalid_file_on_project_root(self):
invalid_file = str(Path('test', 'fixture', 'cli', 'invalid1.vim'))
cmd = [sys.executable, '-m', 'vint', invalid_file]
with self.assertRaises(subprocess.CalledProcessError) as context_manager:
subprocess.check_output(cmd, universal_newlines=True)
got_output = context_manager.exception.output
expected_output_pattern = '{file_path}:1:13:'.format(file_path=invalid_file)
self.assertRegex(got_output, expected_output_pattern)
def test_exec_vint_with_no_args(self):
cmd = [sys.executable, '-m', 'vint']
with self.assertRaises(subprocess.CalledProcessError) as context_manager:
subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
got_output = context_manager.exception.output
expected_output_pattern = r'^vint ERROR:'
self.assertRegex(got_output, expected_output_pattern)
def test_exec_vint_with_unexistent_file(self):
cmd = [sys.executable, '-m', 'vint', '/path/to/unexistent']
with self.assertRaises(subprocess.CalledProcessError) as context_manager:
subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
got_output = context_manager.exception.output
expected_output_pattern = r'^vint ERROR:'
self.assertRegex(got_output, expected_output_pattern)
def test_exec_vint_with_stat_flag(self):
invalid_file = str(Path('test', 'fixture', 'cli', 'invalid1.vim'))
cmd = [sys.executable, '-m', 'vint', '--stat', invalid_file]
with self.assertRaises(subprocess.CalledProcessError) as context_manager:
subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
got_output = context_manager.exception.output
expected_output_pattern = '{file_path}:1:13:'.format(file_path=invalid_file)
expected_stat_pattern = 'Total'
self.assertRegex(got_output, expected_output_pattern)
self.assertRegex(got_output, expected_stat_pattern)
def test_exec_vint_with_json_flag(self):
invalid_file = str(Path('test', 'fixture', 'cli', 'invalid1.vim'))
cmd = [sys.executable, '-m', 'vint', '--json', invalid_file]
with self.assertRaises(subprocess.CalledProcessError) as context_manager:
# We should not capture STRERR because coverage plugin use it.
subprocess.check_output(cmd,
universal_newlines=True)
got_output = context_manager.exception.output
print(got_output)
self.assertIsInstance(json.loads(got_output), list)
def test_exec_vint_with_verbose_flag(self):
valid_file = str(Path('test', 'fixture', 'cli', 'valid1.vim'))
cmd = [sys.executable, '-m', 'vint', '--verbose', valid_file]
got_output = subprocess.check_output(cmd,
universal_newlines=True,
stderr=subprocess.STDOUT)
expected_output_pattern = r'^vint DEBUG:'
self.assertRegex(got_output, expected_output_pattern)
def test_exec_vint_with_color_flag(self):
invalid_file = str(Path('test', 'fixture', 'cli', 'invalid1.vim'))
cmd = [sys.executable, '-m', 'vint', '--color', invalid_file]
with self.assertRaises(subprocess.CalledProcessError) as context_manager:
subprocess.check_output(cmd, universal_newlines=True)
got_output = context_manager.exception.output
expected_output_pattern = r'\033\['
self.assertRegex(got_output, expected_output_pattern)
def test_exec_vint_with_pipe(self):
cmd = 'echo "foo" =~ "bar" | bin/vint --stdin-display-name STDIN_TEST -'
with self.assertRaises(subprocess.CalledProcessError) as context_manager:
subprocess.check_output(cmd, shell=True, universal_newlines=True)
got_output = context_manager.exception.output
expected_output_pattern = '^STDIN_TEST:'
self.assertRegex(got_output, expected_output_pattern)
if __name__ == '__main__':
unittest.main()
|
3f4924f2b8935823d10794ee379085b65107da42
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/sagemaker/device.py
|
08660dc770390e2d180cb913ee62c081571807d9
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 11,104
|
py
|
device.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DeviceArgs', 'Device']
@pulumi.input_type
class DeviceArgs:
def __init__(__self__, *,
device: pulumi.Input['DeviceDeviceArgs'],
device_fleet_name: pulumi.Input[str]):
"""
The set of arguments for constructing a Device resource.
:param pulumi.Input['DeviceDeviceArgs'] device: The device to register with SageMaker Edge Manager. See Device details below.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet.
"""
pulumi.set(__self__, "device", device)
pulumi.set(__self__, "device_fleet_name", device_fleet_name)
@property
@pulumi.getter
def device(self) -> pulumi.Input['DeviceDeviceArgs']:
"""
The device to register with SageMaker Edge Manager. See Device details below.
"""
return pulumi.get(self, "device")
@device.setter
def device(self, value: pulumi.Input['DeviceDeviceArgs']):
pulumi.set(self, "device", value)
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> pulumi.Input[str]:
"""
The name of the Device Fleet.
"""
return pulumi.get(self, "device_fleet_name")
@device_fleet_name.setter
def device_fleet_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_fleet_name", value)
@pulumi.input_type
class _DeviceState:
def __init__(__self__, *,
agent_version: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
device: Optional[pulumi.Input['DeviceDeviceArgs']] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Device resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Device.
:param pulumi.Input['DeviceDeviceArgs'] device: The device to register with SageMaker Edge Manager. See Device details below.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet.
"""
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if device is not None:
pulumi.set(__self__, "device", device)
if device_fleet_name is not None:
pulumi.set(__self__, "device_fleet_name", device_fleet_name)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "agent_version")
@agent_version.setter
def agent_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_version", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Device.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def device(self) -> Optional[pulumi.Input['DeviceDeviceArgs']]:
"""
The device to register with SageMaker Edge Manager. See Device details below.
"""
return pulumi.get(self, "device")
@device.setter
def device(self, value: Optional[pulumi.Input['DeviceDeviceArgs']]):
pulumi.set(self, "device", value)
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Device Fleet.
"""
return pulumi.get(self, "device_fleet_name")
@device_fleet_name.setter
def device_fleet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device_fleet_name", value)
class Device(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device: Optional[pulumi.Input[pulumi.InputType['DeviceDeviceArgs']]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a SageMaker Device resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.Device("example",
device_fleet_name=aws_sagemaker_device_fleet["example"]["device_fleet_name"],
device=aws.sagemaker.DeviceDeviceArgs(
device_name="example",
))
```
## Import
SageMaker Devices can be imported using the `device-fleet-name/device-name`, e.g.,
```sh
$ pulumi import aws:sagemaker/device:Device example my-fleet/my-device
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['DeviceDeviceArgs']] device: The device to register with SageMaker Edge Manager. See Device details below.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeviceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a SageMaker Device resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.Device("example",
device_fleet_name=aws_sagemaker_device_fleet["example"]["device_fleet_name"],
device=aws.sagemaker.DeviceDeviceArgs(
device_name="example",
))
```
## Import
SageMaker Devices can be imported using the `device-fleet-name/device-name`, e.g.,
```sh
$ pulumi import aws:sagemaker/device:Device example my-fleet/my-device
```
:param str resource_name: The name of the resource.
:param DeviceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeviceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device: Optional[pulumi.Input[pulumi.InputType['DeviceDeviceArgs']]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeviceArgs.__new__(DeviceArgs)
if device is None and not opts.urn:
raise TypeError("Missing required property 'device'")
__props__.__dict__["device"] = device
if device_fleet_name is None and not opts.urn:
raise TypeError("Missing required property 'device_fleet_name'")
__props__.__dict__["device_fleet_name"] = device_fleet_name
__props__.__dict__["agent_version"] = None
__props__.__dict__["arn"] = None
super(Device, __self__).__init__(
'aws:sagemaker/device:Device',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
agent_version: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
device: Optional[pulumi.Input[pulumi.InputType['DeviceDeviceArgs']]] = None,
device_fleet_name: Optional[pulumi.Input[str]] = None) -> 'Device':
"""
Get an existing Device resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Device.
:param pulumi.Input[pulumi.InputType['DeviceDeviceArgs']] device: The device to register with SageMaker Edge Manager. See Device details below.
:param pulumi.Input[str] device_fleet_name: The name of the Device Fleet.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DeviceState.__new__(_DeviceState)
__props__.__dict__["agent_version"] = agent_version
__props__.__dict__["arn"] = arn
__props__.__dict__["device"] = device
__props__.__dict__["device_fleet_name"] = device_fleet_name
return Device(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> pulumi.Output[str]:
return pulumi.get(self, "agent_version")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Device.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def device(self) -> pulumi.Output['outputs.DeviceDevice']:
"""
The device to register with SageMaker Edge Manager. See Device details below.
"""
return pulumi.get(self, "device")
@property
@pulumi.getter(name="deviceFleetName")
def device_fleet_name(self) -> pulumi.Output[str]:
"""
The name of the Device Fleet.
"""
return pulumi.get(self, "device_fleet_name")
|
ce340b629000a517bc21617bdcb6fd2702cd8c60
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CondCore/CondHDF5ESSource/scripts/condhdf5tohdf5.py
|
470febaa9bdddce527ecaad9d242d40a97c1c14f
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 5,461
|
py
|
condhdf5tohdf5.py
|
#!/usr/bin/env python3
import argparse
import sys
import logging
import copy
import h5py
import numpy as np
from collections import OrderedDict
import zlib
import lzma
from CondCore.CondHDF5ESSource.hdf5Writer import writeH5File
#Global tags hold a list of Tags
# Tags give the
# record name,
# list of data products
# list of IOVs
# list of payloads per IOV
# Payloads give
# a payload name and
# the serialized data for a data product
# the type of data for the data product
#
class IOVSyncValue(object):
def __init__(self, high, low):
self.high = high
self.low = low
class H5Payload(object):
def __init__(self,dataset,name, compressor):
self._dataset = dataset
self._hash = name
self._type = dataset.attrs['type']
self._memsize = dataset.attrs['memsize']
self._compressor = compressor
def name(self):
return self._hash
def actualType(self):
return self._type
def memsize(self):
return self._memsize
def data(self):
ds = self._dataset[()]
if len(ds) == self.memsize():
return ds
#was compressed
return self._compressor.decompress(ds)
class H5DataProduct(object):
def __init__(self, group, name, compressor):
self._type = group.attrs['type']
self._name = name
self._payloadGroup = group['Payloads']
self._compressor = compressor
def name(self):
return self._name
def objtype(self):
return self._type
def payloads(self):
return [H5Payload(self._payloadGroup[p],p.split('/')[-1], self._compressor) for p in self._payloadGroup]
def idToPayloadNames(self):
return { self._payloadGroup[p].id:p.split('/')[-1] for p in self._payloadGroup }
class H5Tag(object):
def __init__(self, file, group, name):
self._file = file
compressor = None
compressorName = self._file.attrs['default_payload_compressor']
if compressorName == 'lzma':
compressor = lzma
if compressorName == 'zlib':
compressor = zlib
self._group = group
self._record = self._group.attrs['record']
self._name = name
recordGroup = file['Records'][self._record]
dataProductsGroup = recordGroup['DataProducts']
self._dataProducts = [H5DataProduct(dataProductsGroup[g],g.split('/')[-1], compressor) for g in dataProductsGroup]
self._dbtags = self._group.attrs['db_tags']
self._time_type = self._group.attrs['time_type']
def record(self):
return self._record
def name(self):
return self._name
def time_type(self):
return self._time_type
def originalTagNames(self):
return self._dbtags
def iovsNPayloadNames(self):
#asking an h5 object for its name is a slow operation
idToName = {self._file['null_payload'].id: None}
for d in self._dataProducts:
idToName.update(d.idToPayloadNames())
first = self._group['first'][()]
last = self._group['last'][()]
payloadRefs = self._group['payload']
return list(zip( (IOVSyncValue(x['high'],x['low']) for x in first),
(IOVSyncValue(x['high'],x['low']) for x in last),
([idToName[self._file[r].id] for r in refs] for refs in payloadRefs)) )
def dataProducts(self):
return self._dataProducts
class H5GlobalTag(object):
def __init__(self, filename, name):
self._file = h5py.File(filename,'r')
self._name = name
def tags(self):
#looking up names is slow so better to make cache
tagID2Name = {}
recordsGroup = self._file['Records']
for recordName in recordsGroup:
r = recordsGroup[recordName]
tagsGroup = r['Tags']
for tagName in tagsGroup:
tagID2Name[tagsGroup[tagName].id] = tagName
globalTagGroup = self._file['GlobalTags'][self._name]
return (H5Tag(self._file, self._file[t], tagID2Name[self._file[t].id]) for t in globalTagGroup['Tags'])
def main():
parser = argparse.ArgumentParser(description='Read from HDF5 file and write to HDF5 file')
parser.add_argument('input', help="Name of file to read")
parser.add_argument('name', nargs='+', help="Name of the global tag.")
parser.add_argument('--exclude', '-e', nargs='*', help = 'list of records to exclude from the file (can not be used with --include)')
parser.add_argument('--include', '-i', nargs='*', help = 'lost of the only records that should be included in the file (can not be used with --exclude')
parser.add_argument('--output', '-o', default='test.h5cond', help='name of hdf5 output file to write')
parser.add_argument('--compressor', '-c', default='zlib', choices=['zlib', 'lzma', 'none'], help="compress data using 'zlib', 'lzma' or 'none'")
args = parser.parse_args()
if args.exclude and args.include:
print("Can not use --exclude and --include at the same time")
exit(-1)
excludeRecords = set()
if args.exclude:
excludeRecords = set(args.exclude)
includeRecords = set()
if args.include:
includeRecords = set(args.include)
writeH5File(args.output, args.name, excludeRecords, includeRecords, lambda x: H5GlobalTag(args.input, x), args.compressor)
if __name__ == '__main__':
main()
|
5d61c32bcfc60573f79893c7ef237c5e5f909424
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoTracker/SiTrackerMRHTools/python/SiTrackerMultiRecHitUpdator_cfi.py
|
1c68e75127c60c25a2b54632d350e558f1679f7d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
SiTrackerMultiRecHitUpdator_cfi.py
|
import FWCore.ParameterSet.Config as cms
siTrackerMultiRecHitUpdator = cms.ESProducer("SiTrackerMultiRecHitUpdatorESProducer",
ComponentName = cms.string('SiTrackerMultiRecHitUpdator'),
TTRHBuilder = cms.string('WithAngleAndTemplate'),
HitPropagator = cms.string('trackingRecHitPropagator'),
#AnnealingProgram = cms.vdouble(80.0, 9.0, 4.0, 1.0, 1.0, 1.0),
AnnealingProgram = cms.vdouble(30.0, 18.0, 14.0, 11.0, 6.0, 4.0, 2.0, 1.0),
ChiSquareCut1D = cms.double(10.8276),
ChiSquareCut2D = cms.double(13.8155),
Debug = cms.bool(False)
)
|
f88601ec913a399405997749144e8367643651cf
|
8bd6ebb097edf536db657c963247bbf493cbba52
|
/cmasher/colormaps/savanna/savanna.py
|
d1a88d54554b64148314511555e55b845d30f5ed
|
[
"BSD-3-Clause"
] |
permissive
|
1313e/CMasher
|
3a5f70b2238a2f4494de60d492c71b5fa1da1630
|
1e76ef900557e1a3aa40a7fe85a356d296f8379b
|
refs/heads/master
| 2023-08-22T10:51:44.778752
| 2021-11-30T12:20:02
| 2021-11-30T12:20:02
| 222,056,142
| 346
| 28
|
BSD-3-Clause
| 2023-08-07T16:59:40
| 2019-11-16T06:15:58
|
Python
|
UTF-8
|
Python
| false
| false
| 13,142
|
py
|
savanna.py
|
# %% IMPORTS
# Package imports
from matplotlib.cm import register_cmap
from matplotlib.colors import ListedColormap
# All declaration
__all__ = ['cmap']
# Author declaration
__author__ = "Ellert van der Velden (@1313e)"
# Package declaration
__package__ = 'cmasher'
# %% GLOBALS AND DEFINITIONS
# Type of this colormap
cm_type = 'sequential'
# RGB-values of this colormap
cm_data = [[0.00000000, 0.00000000, 0.00000000],
[0.00021127, 0.00023624, 0.00030051],
[0.00071521, 0.00082262, 0.00108605],
[0.00144756, 0.00171107, 0.00233599],
[0.00237423, 0.00288202, 0.00405559],
[0.00347011, 0.00432429, 0.00625646],
[0.00471439, 0.00603096, 0.00895301],
[0.00608853, 0.00799769, 0.01216114],
[0.00757533, 0.01022195, 0.01589760],
[0.00915818, 0.01270258, 0.02017971],
[0.01082054, 0.01543959, 0.02502552],
[0.01254643, 0.01843387, 0.03045152],
[0.01431862, 0.02168740, 0.03647625],
[0.01612043, 0.02520280, 0.04302171],
[0.01793380, 0.02898372, 0.04959648],
[0.01973977, 0.03303468, 0.05615287],
[0.02151899, 0.03736103, 0.06269190],
[0.02325050, 0.04192759, 0.06921411],
[0.02491215, 0.04648911, 0.07571838],
[0.02648137, 0.05103331, 0.08220096],
[0.02793261, 0.05556664, 0.08865845],
[0.02924088, 0.06009502, 0.09508342],
[0.03037902, 0.06462446, 0.10146755],
[0.03131958, 0.06916070, 0.10779956],
[0.03203545, 0.07370928, 0.11406515],
[0.03249903, 0.07827565, 0.12024779],
[0.03268731, 0.08286440, 0.12632612],
[0.03258077, 0.08747954, 0.13227603],
[0.03216688, 0.09212389, 0.13807027],
[0.03144304, 0.09679869, 0.14367931],
[0.03041983, 0.10150308, 0.14907282],
[0.02912194, 0.10623389, 0.15422238],
[0.02758833, 0.11098570, 0.15910410],
[0.02587000, 0.11575112, 0.16370124],
[0.02402557, 0.12052155, 0.16800595],
[0.02211603, 0.12528804, 0.17201958],
[0.02019935, 0.13004222, 0.17575168],
[0.01832694, 0.13477690, 0.17921794],
[0.01654180, 0.13948637, 0.18243787],
[0.01487813, 0.14416649, 0.18543263],
[0.01336293, 0.14881441, 0.18822358],
[0.01201502, 0.15342871, 0.19083072],
[0.01084954, 0.15800864, 0.19327269],
[0.00987627, 0.16255434, 0.19556586],
[0.00910289, 0.16706634, 0.19772478],
[0.00853481, 0.17154554, 0.19976204],
[0.00817590, 0.17599309, 0.20168839],
[0.00802903, 0.18041026, 0.20351295],
[0.00809747, 0.18479827, 0.20524382],
[0.00838274, 0.18915858, 0.20688719],
[0.00888799, 0.19349238, 0.20844896],
[0.00961513, 0.19780105, 0.20993340],
[0.01056723, 0.20208576, 0.21134443],
[0.01174772, 0.20634764, 0.21268527],
[0.01316033, 0.21058773, 0.21395847],
[0.01480927, 0.21480706, 0.21516599],
[0.01669959, 0.21900650, 0.21630953],
[0.01883690, 0.22318688, 0.21739032],
[0.02122739, 0.22734893, 0.21840920],
[0.02387809, 0.23149333, 0.21936679],
[0.02679688, 0.23562065, 0.22026353],
[0.02999261, 0.23973134, 0.22109988],
[0.03347435, 0.24382590, 0.22187550],
[0.03725272, 0.24790463, 0.22259067],
[0.04131815, 0.25196786, 0.22324488],
[0.04546934, 0.25601577, 0.22383811],
[0.04968110, 0.26004859, 0.22436966],
[0.05395167, 0.26406634, 0.22483957],
[0.05827960, 0.26806915, 0.22524694],
[0.06266435, 0.27205699, 0.22559140],
[0.06710576, 0.27602979, 0.22587264],
[0.07160396, 0.27998749, 0.22608987],
[0.07615937, 0.28392995, 0.22624261],
[0.08077263, 0.28785698, 0.22633043],
[0.08544447, 0.29176839, 0.22635291],
[0.09017571, 0.29566392, 0.22630943],
[0.09496721, 0.29954333, 0.22619956],
[0.09981987, 0.30340630, 0.22602290],
[0.10473454, 0.30725251, 0.22577907],
[0.10971208, 0.31108163, 0.22546774],
[0.11475327, 0.31489330, 0.22508860],
[0.11985886, 0.31868712, 0.22464142],
[0.12502951, 0.32246272, 0.22412597],
[0.13026584, 0.32621969, 0.22354204],
[0.13556839, 0.32995762, 0.22288951],
[0.14093760, 0.33367608, 0.22216827],
[0.14637384, 0.33737466, 0.22137828],
[0.15187742, 0.34105292, 0.22051952],
[0.15744854, 0.34471042, 0.21959202],
[0.16308735, 0.34834673, 0.21859585],
[0.16879389, 0.35196142, 0.21753112],
[0.17456816, 0.35555406, 0.21639796],
[0.18041009, 0.35912420, 0.21519651],
[0.18631953, 0.36267144, 0.21392696],
[0.19229627, 0.36619534, 0.21258955],
[0.19834006, 0.36969548, 0.21118451],
[0.20445056, 0.37317146, 0.20971211],
[0.21062740, 0.37662288, 0.20817264],
[0.21687017, 0.38004933, 0.20656639],
[0.22317839, 0.38345043, 0.20489369],
[0.22955155, 0.38682581, 0.20315486],
[0.23598910, 0.39017509, 0.20135027],
[0.24249047, 0.39349791, 0.19948024],
[0.24905505, 0.39679392, 0.19754511],
[0.25568221, 0.40006279, 0.19554522],
[0.26237127, 0.40330417, 0.19348093],
[0.26912156, 0.40651776, 0.19135258],
[0.27593236, 0.40970325, 0.18916052],
[0.28280294, 0.41286033, 0.18690507],
[0.28973256, 0.41598873, 0.18458655],
[0.29672048, 0.41908816, 0.18220526],
[0.30376592, 0.42215835, 0.17976151],
[0.31086810, 0.42519907, 0.17725556],
[0.31802622, 0.42821006, 0.17468769],
[0.32523950, 0.43119110, 0.17205813],
[0.33250712, 0.43414196, 0.16936710],
[0.33982828, 0.43706245, 0.16661482],
[0.34720217, 0.43995234, 0.16380141],
[0.35462797, 0.44281148, 0.16092708],
[0.36210483, 0.44563967, 0.15799196],
[0.36963193, 0.44843677, 0.15499618],
[0.37720843, 0.45120262, 0.15193985],
[0.38483348, 0.45393709, 0.14882307],
[0.39250625, 0.45664004, 0.14564590],
[0.40022589, 0.45931137, 0.14240845],
[0.40799152, 0.46195098, 0.13911080],
[0.41580230, 0.46455879, 0.13575305],
[0.42365735, 0.46713471, 0.13233534],
[0.43155579, 0.46967870, 0.12885784],
[0.43949674, 0.47219070, 0.12532076],
[0.44747929, 0.47467069, 0.12172447],
[0.45550252, 0.47711867, 0.11806944],
[0.46356547, 0.47953465, 0.11435629],
[0.47166721, 0.48191865, 0.11058588],
[0.47980676, 0.48427072, 0.10675931],
[0.48798313, 0.48659092, 0.10287804],
[0.49619527, 0.48887937, 0.09894406],
[0.50444210, 0.49113618, 0.09495995],
[0.51272252, 0.49336150, 0.09092904],
[0.52103535, 0.49555554, 0.08685563],
[0.52937937, 0.49771852, 0.08274531],
[0.53775329, 0.49985071, 0.07860522],
[0.54615576, 0.50195242, 0.07444453],
[0.55458555, 0.50402395, 0.07027455],
[0.56304105, 0.50606574, 0.06611039],
[0.57152052, 0.50807833, 0.06197130],
[0.58002218, 0.51006230, 0.05788155],
[0.58854454, 0.51201813, 0.05387095],
[0.59708528, 0.51394668, 0.04997862],
[0.60564203, 0.51584887, 0.04625303],
[0.61421304, 0.51772537, 0.04275204],
[0.62279496, 0.51957762, 0.03953971],
[0.63138544, 0.52140664, 0.03676688],
[0.63998086, 0.52321408, 0.03455383],
[0.64857818, 0.52500137, 0.03294406],
[0.65717288, 0.52677070, 0.03198783],
[0.66576123, 0.52852395, 0.03173842],
[0.67433798, 0.53026382, 0.03225684],
[0.68289757, 0.53199323, 0.03361012],
[0.69143396, 0.53371544, 0.03587258],
[0.69994048, 0.53543414, 0.03912678],
[0.70840920, 0.53715377, 0.04335234],
[0.71683136, 0.53887936, 0.04837996],
[0.72519716, 0.54061662, 0.05415299],
[0.73349564, 0.54237205, 0.06060590],
[0.74171471, 0.54415296, 0.06767858],
[0.74984067, 0.54596772, 0.07531845],
[0.75785844, 0.54782574, 0.08348074],
[0.76575166, 0.54973737, 0.09212776],
[0.77350301, 0.55171376, 0.10122782],
[0.78109366, 0.55376723, 0.11075252],
[0.78850445, 0.55591059, 0.12067586],
[0.79571606, 0.55815698, 0.13097232],
[0.80270968, 0.56051955, 0.14161530],
[0.80946781, 0.56301089, 0.15257625],
[0.81597527, 0.56564237, 0.16382468],
[0.82221959, 0.56842384, 0.17532697],
[0.82819208, 0.57136288, 0.18704833],
[0.83388796, 0.57446464, 0.19895314],
[0.83930644, 0.57773167, 0.21100550],
[0.84445059, 0.58116396, 0.22317068],
[0.84932690, 0.58475913, 0.23541639],
[0.85394468, 0.58851276, 0.24771383],
[0.85831524, 0.59241887, 0.26003856],
[0.86245136, 0.59647035, 0.27236790],
[0.86636658, 0.60065936, 0.28468421],
[0.87007489, 0.60497762, 0.29697136],
[0.87358983, 0.60941682, 0.30921950],
[0.87692479, 0.61396871, 0.32141887],
[0.88009251, 0.61862536, 0.33356258],
[0.88310482, 0.62337924, 0.34564636],
[0.88597340, 0.62822309, 0.35766469],
[0.88870865, 0.63315027, 0.36961556],
[0.89131974, 0.63815479, 0.38149950],
[0.89381598, 0.64323082, 0.39331424],
[0.89620613, 0.64837293, 0.40505793],
[0.89849692, 0.65357662, 0.41673367],
[0.90069559, 0.65883742, 0.42834111],
[0.90280874, 0.66415124, 0.43988061],
[0.90484231, 0.66951441, 0.45135307],
[0.90680168, 0.67492362, 0.46275990],
[0.90869164, 0.68037592, 0.47410291],
[0.91051770, 0.68586821, 0.48538131],
[0.91228480, 0.69139774, 0.49659512],
[0.91399479, 0.69696302, 0.50775074],
[0.91565443, 0.70256083, 0.51884273],
[0.91726467, 0.70819019, 0.52987805],
[0.91883159, 0.71384831, 0.54085209],
[0.92035518, 0.71953477, 0.55177259],
[0.92184253, 0.72524654, 0.56263236],
[0.92329389, 0.73098322, 0.57343774],
[0.92471179, 0.73674362, 0.58418996],
[0.92609946, 0.74252636, 0.59488875],
[0.92746172, 0.74832952, 0.60553081],
[0.92879879, 0.75415292, 0.61612103],
[0.93011328, 0.75999556, 0.62665970],
[0.93140767, 0.76585651, 0.63714716],
[0.93268434, 0.77173494, 0.64758382],
[0.93394557, 0.77763008, 0.65797012],
[0.93519353, 0.78354123, 0.66830656],
[0.93643033, 0.78946776, 0.67859365],
[0.93765802, 0.79540907, 0.68883193],
[0.93887857, 0.80136465, 0.69902193],
[0.94009555, 0.80733344, 0.70916176],
[0.94131079, 0.81331502, 0.71925214],
[0.94252508, 0.81930934, 0.72929525],
[0.94374027, 0.82531602, 0.73929162],
[0.94495845, 0.83133459, 0.74924136],
[0.94618604, 0.83736322, 0.75913865],
[0.94742056, 0.84340300, 0.76899008],
[0.94866392, 0.84945360, 0.77879603],
[0.94992365, 0.85551289, 0.78854940],
[0.95119750, 0.86158194, 0.79825632],
[0.95248826, 0.86766017, 0.80791609],
[0.95380283, 0.87374574, 0.81752297],
[0.95513783, 0.87984007, 0.82708439],
[0.95650313, 0.88594043, 0.83659117],
[0.95789582, 0.89204808, 0.84605033],
[0.95932448, 0.89816073, 0.85545487],
[0.96078883, 0.90427883, 0.86480892],
[0.96229754, 0.91040005, 0.87410627],
[0.96385087, 0.91652463, 0.88335138],
[0.96546071, 0.92264922, 0.89253589],
[0.96712741, 0.92877384, 0.90166658],
[0.96886707, 0.93489360, 0.91073462],
[0.97068667, 0.94100583, 0.91974718],
[0.97260803, 0.94710245, 0.92870598],
[0.97467260, 0.95316709, 0.93761771],
[0.97697694, 0.95916172, 0.94648703],
[0.97976901, 0.96500288, 0.95517802],
[0.98320766, 0.97067583, 0.96324868],
[0.98683577, 0.97636954, 0.97066284],
[0.99031830, 0.98216912, 0.97789331],
[0.99365089, 0.98805248, 0.98516675],
[0.99687019, 0.99399949, 0.99253395],
[1.00000000, 1.00000000, 1.00000000]]
# Create ListedColormap object for this colormap
cmap = ListedColormap(cm_data, name='cmr.savanna', N=256)
cmap_r = cmap.reversed()
# Register (reversed) cmap in MPL
register_cmap(cmap=cmap)
register_cmap(cmap=cmap_r)
|
e2b46bb6c93b0e0e75c9e206f7db7d5236b4ca84
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/battle_results/reusable/economics.py
|
e52048ccc4ad11d6f74727c934d3bdfe4efd56c5
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 23,914
|
py
|
economics.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_results/reusable/economics.py
import typing
import itertools
from collections import namedtuple
from constants import PREMIUM_TYPE, ARENA_BONUS_TYPE
from gui.battle_results.reusable import shared
from gui.battle_results.reusable import records
from gui.battle_results.br_constants import PremiumState, FactorValue
from gui.shared.money import Currency
from debug_utils import LOG_ERROR
from dossiers2.custom.records import RECORD_DB_IDS
from helpers import dependency
from skeletons.gui.shared import IItemsCache
from skeletons.gui.lobby_context import ILobbyContext
from ValueReplay import ValueReplay, ValueReplayConnector
_CrystalDetails = namedtuple('_CrystalDetails', ('earned', 'expenses'))
def _createCrystalDetails(earned=None, expenses=0):
earned = earned if earned is not None else []
return _CrystalDetails(earned=earned, expenses=expenses)
class _AdditionalRecords(records.RawRecords):
__slots__ = ()
def __init__(self, results):
rawRecords = {}
repairCost = results.get('autoRepairCost')
if repairCost is not None:
rawRecords['autoRepairCost'] = -repairCost
autoLoadCost = results.get('autoLoadCost')
if autoLoadCost is not None:
rawRecords['autoLoadCredits'] = -autoLoadCost[0]
rawRecords['autoLoadGold'] = -autoLoadCost[1]
boostersCost = results.get('autoEquipBoostersCost')
if boostersCost is not None:
rawRecords['autoBoostersCredits'] = -boostersCost[0]
rawRecords['autoBoostersGold'] = -boostersCost[1]
rawRecords['autoBoostersCrystal'] = -boostersCost[2]
equipCost = results.get('autoEquipCost')
if equipCost is not None:
rawRecords['autoEquipCredits'] = self.__getAutoEquipCost(equipCost, boostersCost, 0)
rawRecords['autoEquipGold'] = self.__getAutoEquipCost(equipCost, boostersCost, 1)
if 'piggyBank' in results:
cost = results['piggyBank']
if cost is not None:
rawRecords['piggyBank'] = cost
super(_AdditionalRecords, self).__init__(rawRecords)
return
@classmethod
def __getAutoEquipCost(cls, equipCost, boosterCost, idx):
return -equipCost[idx] if boosterCost is None else -equipCost[idx] + boosterCost[idx]
class _CreditsReplayRecords(records.ReplayRecords):
__slots__ = ()
def __init__(self, replay, results, squadCreditsFactor=0):
super(_CreditsReplayRecords, self).__init__(replay, 'credits')
self._addRecord(ValueReplay.SUB, 'originalCreditsToDraw', results['originalCreditsToDraw'], 0)
self._addRecord(ValueReplay.SET, 'achievementCredits', results['achievementCredits'], 0)
self._addRecord(ValueReplay.FACTOR, 'premSquadCreditsFactor100', squadCreditsFactor, 0)
self._addRecord(ValueReplay.SUBCOEFF, 'originalCreditsToDrawSquad', results['originalCreditsToDrawSquad'], results['originalCreditsToDrawSquad'] * self.getFactor('premSquadCreditsFactor100') * -1)
def _getRecord(self, name):
value = super(_CreditsReplayRecords, self)._getRecord(name)
if name in ('originalCreditsToDraw', 'achievementCredits'):
value = records.makeReplayValueRound(value * self.getFactor('appliedPremiumCreditsFactor100'))
return value
class _CrystalRecords(records.RawRecords):
__slots__ = ()
def __init__(self, replay, results):
rawRecords = {}
eventToken = 'eventCrystalList_'
eventsCrystals = 0
for _, (appliedName, appliedValue), (_, _) in replay:
if appliedName == 'originalCrystal' and appliedValue:
rawRecords[appliedName] = appliedValue
if appliedName.startswith(eventToken):
eventsCrystals += appliedValue
if eventsCrystals:
rawRecords['events'] = eventsCrystals
if 'autoEquipCost' in results:
cost = results['autoEquipCost']
if cost is not None:
rawRecords['autoEquipCrystals'] = -cost[2]
super(_CrystalRecords, self).__init__(rawRecords)
return
class _XPReplayRecords(records.ReplayRecords):
__slots__ = ()
def __init__(self, replay, isHighScope, achievementXP):
super(_XPReplayRecords, self).__init__(replay, 'xp')
if isHighScope:
self._addRecord(ValueReplay.SET, 'isHighScope', 1, 0)
self._addRecord(ValueReplay.SET, 'achievementXP', achievementXP, 0)
self._addRecord(ValueReplay.SET, 'xpToShow', max(0, self.getRecord('xp')), 0)
def _getRecord(self, name):
value = super(_XPReplayRecords, self)._getRecord(name)
if name in ('achievementXP',):
value = records.makeReplayValueRound(value * self.getFactor('appliedPremiumXPFactor100'))
return value
class _FreeXPReplayRecords(records.ReplayRecords):
__slots__ = ()
def __init__(self, replay, achievementFreeXP):
super(_FreeXPReplayRecords, self).__init__(replay, 'freeXP')
self._addRecord(ValueReplay.SET, 'achievementFreeXP', achievementFreeXP, 0)
def _getRecord(self, name):
value = super(_FreeXPReplayRecords, self)._getRecord(name)
if name in ('achievementFreeXP',):
value = records.makeReplayValueRound(value * self.getFactor('appliedPremiumXPFactor100'))
return value
class _EconomicsRecordsChains(object):
__slots__ = ('_baseCredits', '_premiumCredits', '_premiumPlusCredits', '_goldRecords', '_additionalRecords', '_baseXP', '_premiumXP', '_premiumPlusXP', '_baseXPAdd', '_premiumXPAdd', '_premiumPlusXPAdd', '_baseFreeXP', '_premiumFreeXP', '_premiumPlusFreeXP', '_baseFreeXPAdd', '_premiumFreeXPAdd', '_premiumPlusFreeXPAdd', '_crystal', '_crystalDetails')
def __init__(self):
super(_EconomicsRecordsChains, self).__init__()
self._baseCredits = records.RecordsIterator()
self._premiumCredits = records.RecordsIterator()
self._premiumPlusCredits = records.RecordsIterator()
self._goldRecords = records.RecordsIterator()
self._additionalRecords = records.RecordsIterator()
self._baseXP = records.RecordsIterator()
self._premiumXP = records.RecordsIterator()
self._premiumPlusXP = records.RecordsIterator()
self._baseXPAdd = records.RecordsIterator()
self._premiumXPAdd = records.RecordsIterator()
self._premiumPlusXPAdd = records.RecordsIterator()
self._baseFreeXP = records.RecordsIterator()
self._premiumFreeXP = records.RecordsIterator()
self._premiumPlusFreeXP = records.RecordsIterator()
self._baseFreeXPAdd = records.RecordsIterator()
self._premiumFreeXPAdd = records.RecordsIterator()
self._premiumPlusFreeXPAdd = records.RecordsIterator()
self._crystal = records.RecordsIterator()
self._crystalDetails = records.RecordsIterator()
def getBaseCreditsRecords(self):
return self._baseCredits
def getPremiumCreditsRecords(self):
return self._premiumCredits
def getCreditsDiff(self):
return self._premiumCredits.getRecord('credits', 'originalCreditsToDraw') - self._baseCredits.getRecord('credits', 'originalCreditsToDraw')
def getBaseXPRecords(self):
return self._baseXP
def getPremiumXPRecords(self):
return self._premiumXP
def getPremiumXPAddRecords(self):
return self._premiumXPAdd
def getMoneyRecords(self, premiumType=PREMIUM_TYPE.NONE):
if premiumType == PREMIUM_TYPE.NONE or premiumType & (PREMIUM_TYPE.VIP | PREMIUM_TYPE.PLUS):
resultPremiumData = self._premiumPlusCredits
else:
resultPremiumData = self._premiumCredits
return itertools.izip(self._baseCredits, resultPremiumData, self._goldRecords, self._additionalRecords)
def getCrystalRecords(self):
return itertools.izip(self._crystal, self._crystal)
def getUnpackedCrystalRecords(self):
return self._crystal
def getCrystalDetails(self):
return self._crystalDetails
def haveCrystalsChanged(self):
spent = self._additionalRecords.getRecord('autoBoostersCrystal')
received = self._crystal.getRecord('originalCrystal')
return any((spent, received)) or self._crystalDetails
def getXPRecords(self, premiumType=PREMIUM_TYPE.NONE, addBonusApplied=False):
if premiumType == PREMIUM_TYPE.NONE or premiumType & (PREMIUM_TYPE.VIP | PREMIUM_TYPE.PLUS):
if addBonusApplied:
secondXPData = self._premiumPlusXPAdd
secondFreeXPData = self._premiumPlusFreeXPAdd
else:
secondXPData = self._premiumPlusXP
secondFreeXPData = self._premiumPlusFreeXP
elif addBonusApplied:
secondXPData = self._premiumXPAdd
secondFreeXPData = self._premiumFreeXPAdd
else:
secondXPData = self._premiumXP
secondFreeXPData = self._premiumFreeXP
if addBonusApplied:
firstXPData = self._baseXPAdd
firstFreeXPData = self._baseFreeXPAdd
else:
firstXPData = self._baseXP
firstFreeXPData = self._baseFreeXP
return itertools.izip(firstXPData, secondXPData, firstFreeXPData, secondFreeXPData)
def getXPDiff(self):
return self._premiumXP.getRecord('xp') - self._baseXP.getRecord('xp')
def addResults(self, _, results):
connector = ValueReplayConnector(results)
self._addMoneyResults(connector, results)
self._addXPResults(connector, results)
self._addCrystalResults(connector, results)
def _addMoneyResults(self, connector, results):
if 'creditsReplay' in results and results['creditsReplay'] is not None:
replay = ValueReplay(connector, recordName='credits', replay=results['creditsReplay'])
appliedPremiumCreditsFactor100Exists = 'appliedPremiumCreditsFactor100' in replay
if appliedPremiumCreditsFactor100Exists:
replay['appliedPremiumCreditsFactor100'] = FactorValue.BASE_CREDITS_FACTOR
self._baseCredits.addRecords(self.__buildCreditsReplayForPremType(PREMIUM_TYPE.NONE, results, replay))
if appliedPremiumCreditsFactor100Exists:
replay['appliedPremiumCreditsFactor100'] = results['premiumCreditsFactor100']
self._premiumCredits.addRecords(self.__buildCreditsReplayForPremType(PREMIUM_TYPE.BASIC, results, replay))
if appliedPremiumCreditsFactor100Exists:
replay['appliedPremiumCreditsFactor100'] = results['premiumPlusCreditsFactor100']
self._premiumPlusCredits.addRecords(self.__buildCreditsReplayForPremType(PREMIUM_TYPE.PLUS, results, replay))
else:
LOG_ERROR('Credits replay is not found', results)
if 'goldReplay' in results and results['goldReplay'] is not None:
replay = ValueReplay(connector, recordName='gold', replay=results['goldReplay'])
self._goldRecords.addRecords(records.ReplayRecords(replay, 'gold'))
else:
LOG_ERROR('Gold replay is not found', results)
self._additionalRecords.addRecords(_AdditionalRecords(results))
return
def _addXPResults(self, connector, results):
premiumType = results.get('premMask', PREMIUM_TYPE.NONE)
hasPremiumPlus = bool(premiumType & PREMIUM_TYPE.PLUS)
if 'xpReplay' in results and results['xpReplay'] is not None:
replay = ValueReplay(connector, recordName='xp', replay=results['xpReplay'])
self.__updateAdditionalFactorFromReplay(replay, results, setDefault=True)
isHighScope = RECORD_DB_IDS[('max15x15', 'maxXP')] in [ recordID for recordID, _ in results.get('dossierPopUps', []) ]
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.NONE)
self._baseXP.addRecords(_XPReplayRecords(replay, isHighScope, results['achievementXP']))
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.BASIC)
self._premiumXP.addRecords(_XPReplayRecords(replay, isHighScope, results['achievementXP']))
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.PLUS)
self._premiumPlusXP.addRecords(_XPReplayRecords(replay, isHighScope, results['achievementXP']))
self.__updateAdditionalFactorFromReplay(replay, results, setDefault=hasPremiumPlus)
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.NONE)
self._baseXPAdd.addRecords(_XPReplayRecords(replay, isHighScope, results['achievementXP']))
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.BASIC)
self._premiumXPAdd.addRecords(_XPReplayRecords(replay, isHighScope, results['achievementXP']))
self.__updateAdditionalFactorFromReplay(replay, results, setDefault=False)
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.PLUS)
self._premiumPlusXPAdd.addRecords(_XPReplayRecords(replay, isHighScope, results['achievementXP']))
else:
LOG_ERROR('XP replay is not found', results)
if 'freeXPReplay' in results and results['freeXPReplay'] is not None:
replay = ValueReplay(connector, recordName='freeXP', replay=results['freeXPReplay'])
self.__updateAdditionalFactorFromReplay(replay, results, setDefault=True)
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.NONE)
self._baseFreeXP.addRecords(_FreeXPReplayRecords(replay, results['achievementFreeXP']))
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.BASIC)
self._premiumFreeXP.addRecords(_FreeXPReplayRecords(replay, results['achievementFreeXP']))
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.PLUS)
self._premiumPlusFreeXP.addRecords(_FreeXPReplayRecords(replay, results['achievementFreeXP']))
self.__updateAdditionalFactorFromReplay(replay, results, setDefault=hasPremiumPlus)
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.NONE)
self._baseFreeXPAdd.addRecords(_FreeXPReplayRecords(replay, results['achievementFreeXP']))
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.BASIC)
self._premiumFreeXPAdd.addRecords(_FreeXPReplayRecords(replay, results['achievementFreeXP']))
self.__updateAdditionalFactorFromReplay(replay, results, setDefault=False)
self.__updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.PLUS)
self._premiumPlusFreeXPAdd.addRecords(_FreeXPReplayRecords(replay, results['achievementFreeXP']))
else:
LOG_ERROR('Free XP replay is not found', results)
return
def _addCrystalResults(self, connector, results):
if 'crystalReplay' in results and results['crystalReplay'] is not None:
replay = ValueReplay(connector, recordName=Currency.CRYSTAL, replay=results['crystalReplay'])
self._crystal.addRecords(records.ReplayRecords(replay, Currency.CRYSTAL))
self._crystalDetails.addRecords(_CrystalRecords(replay, results))
else:
LOG_ERROR('crystalReplay is not found', results)
return
def __buildCreditsReplayForPremType(self, targetPremiumType, results, replay):
initialSquadFactor = results['premSquadCreditsFactor100']
squadCreditsFactor = self.__getPremiumSquadCreditsFactor(results, targetPremiumType)
results['premSquadCreditsFactor100'] = squadCreditsFactor
creditsReplayToUse = _CreditsReplayRecords(replay, results, squadCreditsFactor)
results['premSquadCreditsFactor100'] = initialSquadFactor
return creditsReplayToUse
@staticmethod
def __updateAdditionalFactorFromReplay(replay, results, setDefault=False):
if 'additionalXPFactor10' not in replay:
return
if setDefault:
if 'dailyXPFactor10' in replay:
replay['additionalXPFactor10'] = FactorValue.ADDITIONAL_BONUS_ZERO_FACTOR
else:
replay['additionalXPFactor10'] = FactorValue.ADDITIONAL_BONUS_ONE_FACTOR
else:
replay['additionalXPFactor10'] = results['additionalXPFactor10']
@staticmethod
def __updatePremiumXPFactor(replay, results, premType=PREMIUM_TYPE.NONE):
if 'appliedPremiumXPFactor100' not in replay:
return
if premType == PREMIUM_TYPE.PLUS:
replay['appliedPremiumXPFactor100'] = results['premiumPlusXPFactor100']
elif premType == PREMIUM_TYPE.BASIC:
replay['appliedPremiumXPFactor100'] = results['premiumXPFactor100']
else:
replay['appliedPremiumXPFactor100'] = FactorValue.BASE_XP_FACTOR
@staticmethod
@dependency.replace_none_kwargs(lobbyContext=ILobbyContext)
def __getPremiumSquadCreditsFactor(results, targetPremiumType, lobbyContext=None):
premiumType = PREMIUM_TYPE.activePremium(results.get('premMask', PREMIUM_TYPE.NONE))
if targetPremiumType > premiumType:
return lobbyContext.getServerSettings().squadPremiumBonus.ownCredits * 100
return 0 if targetPremiumType < premiumType else results.get('premSquadCreditsFactor100', 0)
class EconomicsInfo(shared.UnpackedInfo):
__slots__ = ('__isAddXPBonusApplied', '__premiumMask', '__premiumState', '__premiumPlusState', '_economicsRecords')
__itemsCache = dependency.descriptor(IItemsCache)
__lobbyContext = dependency.descriptor(ILobbyContext)
def __init__(self, personal):
super(EconomicsInfo, self).__init__()
self.__isAddXPBonusApplied = False
self.__premiumMask = 0
self.__premiumState = PremiumState.NONE
self.__premiumPlusState = PremiumState.NONE
self._economicsRecords = _EconomicsRecordsChains()
if not self.hasUnpackedItems():
self.__collectRequiredData(personal)
@property
def hasAnyPremium(self):
return bool(self.__premiumMask & PREMIUM_TYPE.ANY)
@property
def isPremium(self):
return bool(PREMIUM_TYPE.activePremium(self.__premiumMask) & PREMIUM_TYPE.BASIC)
@property
def isPremiumPlus(self):
return bool(PREMIUM_TYPE.activePremium(self.__premiumMask) & PREMIUM_TYPE.PLUS)
@property
def isPremiumVIP(self):
return bool(PREMIUM_TYPE.activePremium(self.__premiumMask) & PREMIUM_TYPE.VIP)
@property
def isAddXPBonusApplied(self):
return self.__isAddXPBonusApplied
@isAddXPBonusApplied.setter
def isAddXPBonusApplied(self, state):
self.__isAddXPBonusApplied = state
@property
def premiumState(self):
return self.__premiumState
@premiumState.setter
def premiumState(self, state):
self.__premiumState = state
@property
def premiumPlusState(self):
return self.__premiumPlusState
@premiumPlusState.setter
def premiumPlusState(self, state):
self.__premiumPlusState = state
@property
def isPremiumBought(self):
return self.__premiumState & PremiumState.BOUGHT > 0
@property
def isPremiumPlusBought(self):
return self.__premiumPlusState & PremiumState.BOUGHT > 0
@property
def isPostBattlePremium(self):
return self.isPremium or self.isPremiumBought
@property
def isPostBattlePremiumPlus(self):
return self.isPremiumPlus or self.isPremiumPlusBought
def isActivePremiumPlus(self):
return self.__itemsCache.items.stats.isActivePremium(PREMIUM_TYPE.PLUS)
def getAppliedAdditionalCount(self):
return self.__itemsCache.items.stats.applyAdditionalXPCount
def canUpgradeToPremium(self, arenaBonusType):
return self.__premiumState & PremiumState.BUY_ENABLED > 0 and self.__premiumState & PremiumState.HAS_ALREADY == 0 and not self.isPostBattlePremium and arenaBonusType in (ARENA_BONUS_TYPE.REGULAR, ARENA_BONUS_TYPE.EPIC_RANDOM, ARENA_BONUS_TYPE.EPIC_BATTLE)
def canUpgradeToPremiumPlus(self, arenaBonusType):
return self.__premiumPlusState & PremiumState.BUY_ENABLED > 0 and self.__premiumPlusState & PremiumState.HAS_ALREADY == 0 and not self.isPostBattlePremiumPlus and arenaBonusType in (ARENA_BONUS_TYPE.REGULAR, ARENA_BONUS_TYPE.EPIC_RANDOM, ARENA_BONUS_TYPE.EPIC_BATTLE)
def getPremiumType(self):
premiumType = PREMIUM_TYPE.NONE
if self.isPremiumPlus:
premiumType = PREMIUM_TYPE.PLUS
elif self.isPremium:
premiumType = PREMIUM_TYPE.BASIC
return premiumType
def getActivePremiumType(self):
hasPremiumPlus = self.__itemsCache.items.stats.isActivePremium(PREMIUM_TYPE.PLUS)
hasBasicPremium = self.__itemsCache.items.stats.isActivePremium(PREMIUM_TYPE.BASIC)
premiumType = PREMIUM_TYPE.NONE
if hasPremiumPlus:
premiumType = PREMIUM_TYPE.PLUS
elif hasBasicPremium:
premiumType = PREMIUM_TYPE.BASIC
return premiumType
def getBaseCreditsRecords(self):
return self._economicsRecords.getBaseCreditsRecords()
def getPremiumCreditsRecords(self):
return self._economicsRecords.getPremiumCreditsRecords()
def getCreditsDiff(self):
return self._economicsRecords.getCreditsDiff()
def getMoneyRecords(self):
return self._economicsRecords.getMoneyRecords(self.__premiumMask)
def getCrystalRecords(self):
return self._economicsRecords.getCrystalRecords()
def getUnpackedCrystalRecords(self):
return self._economicsRecords.getUnpackedCrystalRecords()
def haveCrystalsChanged(self):
return self._economicsRecords.haveCrystalsChanged()
def getBaseXPRecords(self):
return self._economicsRecords.getBaseXPRecords()
def getPremiumXPRecords(self):
return self._economicsRecords.getPremiumXPRecords()
def getPremiumXPAddRecords(self):
return self._economicsRecords.getPremiumXPAddRecords()
def getXPRecords(self):
return self._economicsRecords.getXPRecords(self.__premiumMask, self.__isAddXPBonusApplied)
def getXPDiff(self):
return self._economicsRecords.getXPDiff()
def getCrystalDetailsRecords(self):
return self._economicsRecords.getCrystalDetails()
def getXPToShow(self, isDiffShow=False):
values = []
for xpRecords in self.getXPRecords():
baseXP, premiumXP = xpRecords[:2]
xp = premiumXP.getRecord('xpToShow')
value = xp - baseXP.getRecord('xpToShow') if isDiffShow else xp
values.append(value)
return values
def getCreditsToShow(self, isDiffShow=False):
values = []
for creditRecords in self.getMoneyRecords():
baseCredits, premiumCredits = creditRecords[:2]
value = premiumCredits.getRecord('credits', 'originalCreditsToDraw')
if isDiffShow and value > 0:
value -= baseCredits.getRecord('credits', 'originalCreditsToDraw')
values.append(value)
return values
def __collectRequiredData(self, info):
getItemByCD = self.__itemsCache.items.getItemByCD
itemCDs = [ key for key in info.keys() if isinstance(key, int) ]
items = sorted((getItemByCD(itemCD) for itemCD in itemCDs))
for item in items:
intCD = item.intCD
data = info[intCD]
if data is None:
self._addUnpackedItemID(intCD)
continue
self._economicsRecords.addResults(intCD, data)
self.__premiumMask = data.get('premMask', PREMIUM_TYPE.NONE)
return
|
474e694a47d4b0fa66865715ac0a4e90c210b698
|
493d86071bb96ef33a38623a144fe55e49a0aa44
|
/socceraction/data/opta/parsers/f9_json.py
|
5d6b66966fcb0de68d90cfafb5f41a9d0c880979
|
[
"MIT"
] |
permissive
|
ML-KULeuven/socceraction
|
3b8d2411333114436239850d02278785ea0ed83b
|
1261a31cf99d0c9a819b67f568549aa47df83f08
|
refs/heads/master
| 2023-08-31T02:25:42.349813
| 2023-08-30T08:53:01
| 2023-08-30T08:53:01
| 194,881,505
| 517
| 131
|
MIT
| 2023-09-14T09:46:42
| 2019-07-02T14:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 11,898
|
py
|
f9_json.py
|
"""JSON parser for Opta F9 feeds."""
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from ...base import MissingDataError
from .base import OptaJSONParser, assertget
class F9JSONParser(OptaJSONParser):
"""Extract data from a Opta F9 data stream.
Parameters
----------
path : str
Path of the data file.
"""
def _get_feed(self) -> Dict[str, Any]:
for node in self.root:
if "OptaFeed" in node["data"].keys():
return node
raise MissingDataError
def _get_doc(self) -> Dict[str, Any]:
f9 = self._get_feed()
data = assertget(f9, "data")
optafeed = assertget(data, "OptaFeed")
optadocument = assertget(optafeed, "OptaDocument")[0]
return optadocument
def _get_stats(self, obj: Dict[str, Any]) -> Dict[str, Any]:
if "Stat" not in obj:
return {}
stats = {}
statobj = obj["Stat"] if isinstance(obj["Stat"], list) else [obj["Stat"]]
for stat in statobj:
stats[stat["@attributes"]['Type']] = stat["@value"]
return stats
def _get_name(self, obj: Dict[str, Any]) -> Optional[str]:
if "Known" in obj and obj["Known"].strip():
return obj["Known"]
if "First" in obj and "Last" in obj and obj["Last"].strip() or obj["First"].strip():
return (obj["First"] + " " + obj["Last"]).strip()
return None
def extract_games(self) -> Dict[int, Dict[str, Any]]:
"""Return a dictionary with all available games.
Returns
-------
dict
A mapping between game IDs and the information available about
each game in the data stream.
"""
optadocument = self._get_doc()
attr = assertget(optadocument, "@attributes")
matchdata = assertget(optadocument, "MatchData")
competition = assertget(optadocument, "Competition")
competitionstat = self._get_stats(competition)
venue = assertget(optadocument, "Venue")
matchofficial = assertget(matchdata, "MatchOfficial")
matchinfo = assertget(matchdata, "MatchInfo")
matchstat = self._get_stats(matchdata)
teamdata = assertget(matchdata, "TeamData")
scores = {}
for t in teamdata:
scores[t["@attributes"]["Side"]] = t["@attributes"]["Score"]
game_id = int(assertget(attr, "uID")[1:])
game_dict = dict(
# Fields required by the base schema
game_id=game_id,
competition_id=int(assertget(assertget(competition, "@attributes"), "uID")[1:]),
season_id=assertget(competitionstat, "season_id"),
game_day=competitionstat["matchday"] if "matchday" in competitionstat else None,
game_date=datetime.strptime(assertget(matchinfo, "Date"), "%Y%m%dT%H%M%S%z").replace(
tzinfo=None
),
# home_team_id=see below
# away_team_id=see below
# Optional fields
home_score=int(scores["Home"]),
away_score=int(scores["Away"]),
duration=int(assertget(matchstat, "match_time")),
referee=self._get_name(matchofficial["OfficialName"])
if "OfficialName" in matchofficial
else None,
venue=venue["Name"] if "Name" in venue else None,
attendance=int(matchinfo["Attendance"]) if "Attendance" in matchinfo else None,
# home_manager=see below
# away_manager=see below
)
for team in teamdata:
teamattr = assertget(team, '@attributes')
side = assertget(teamattr, 'Side')
teamid = assertget(teamattr, 'TeamRef')
score = assertget(teamattr, 'Score')
manager = (
self._get_name(team["TeamOfficial"]["PersonName"])
if "TeamOfficial" in team
else None
)
if side == 'Home':
game_dict['home_team_id'] = int(teamid[1:])
game_dict['home_score'] = int(score)
game_dict['home_manager'] = manager
else:
game_dict['away_team_id'] = int(teamid[1:])
game_dict['away_score'] = int(score)
game_dict['away_manager'] = manager
return {game_id: game_dict}
def extract_teams(self) -> Dict[int, Dict[str, Any]]:
"""Return a dictionary with all available teams.
Returns
-------
dict
A mapping between team IDs and the information available about
each team in the data stream.
"""
optadocument = self._get_doc()
root_teams = assertget(optadocument, "Team")
teams = {}
for team in root_teams:
if "id" in team.keys():
nameobj = team.get("nameObj")
team_id = int(team["id"])
teams[team_id] = dict(
# Fields required by the base schema
team_id=team_id,
team_name=nameobj.get("name"),
)
return teams
def extract_players(self) -> Dict[Tuple[int, int], Dict[str, Any]]:
"""Return a dictionary with all available players.
Returns
-------
dict
A mapping between (game ID, player ID) tuples and the information
available about each player in the data stream.
"""
optadocument = self._get_doc()
attr = assertget(optadocument, "@attributes")
game_id = int(assertget(attr, "uID")[1:])
root_teams = assertget(optadocument, "Team")
lineups = self.extract_lineups()
players = {}
for team in root_teams:
team_id = int(team["@attributes"]["uID"].replace("t", ""))
for player in team["Player"]:
player_id = int(player["@attributes"]["uID"].replace("p", ""))
assert "nameObj" in player["PersonName"]
nameobj = player["PersonName"]["nameObj"]
if not nameobj.get("is_unknown"):
player = dict(
# Fields required by the base schema
game_id=game_id,
team_id=team_id,
player_id=player_id,
player_name=self._get_name(player["PersonName"]),
# is_starter=
# minutes_played=
# jersey_number=
# Fields required by the opta schema
# starting_position=
# Optional fields
# height="?",
# weight="?",
# age="?",
)
if player_id in lineups[team_id]["players"]:
player = dict(
**player,
jersey_number=lineups[team_id]["players"][player_id]["jersey_number"],
starting_position=lineups[team_id]["players"][player_id][
"starting_position_name"
],
is_starter=lineups[team_id]["players"][player_id]["is_starter"],
minutes_played=lineups[team_id]["players"][player_id][
"minutes_played"
],
)
players[(game_id, player_id)] = player
return players
def extract_lineups(self) -> Dict[int, Dict[str, Any]]:
"""Return a dictionary with the lineup of each team.
Raises
------
MissingDataError
If teams data is not available in the stream.
Returns
-------
dict
A mapping between team IDs and the information available about
each team's lineup in the data stream.
"""
optadocument = self._get_doc()
attr = assertget(optadocument, "@attributes")
try:
rootf9 = optadocument["MatchData"]["TeamData"]
except KeyError as e:
raise MissingDataError from e
matchstats = optadocument["MatchData"]["Stat"]
matchstats = [matchstats] if isinstance(matchstats, dict) else matchstats
matchstatsdict = {stat["@attributes"]["Type"]: stat["@value"] for stat in matchstats}
lineups: Dict[int, Dict[str, Any]] = {}
for team in rootf9:
# lineup attributes
team_id = int(team["@attributes"]["TeamRef"].replace("t", ""))
lineups[team_id] = dict(players=dict())
# substitutes
subst = [s["@attributes"] for s in team["Substitution"]]
# red cards
red_cards = {
int(e["@attributes"]["PlayerRef"].replace("p", "")): e["@attributes"]["Time"]
for e in team.get("Booking", [])
if "CardType" in e["@attributes"]
and e["@attributes"]["CardType"] in ["Red", "SecondYellow"]
and "PlayerRef" in e["@attributes"] # not defined if a coach receives a red card
}
for player in team["PlayerLineUp"]["MatchPlayer"]:
attr = player["@attributes"]
player_id = int(attr["PlayerRef"].replace("p", ""))
playerstatsdict = {
stat["@attributes"]["Type"]: stat["@value"] for stat in player["Stat"]
}
sub_on = next(
(
item["Time"]
for item in subst
if "Retired" not in item and item["SubOn"] == f"p{player_id}"
),
matchstatsdict["match_time"] if attr["Status"] == "Sub" else 0,
)
sub_off = next(
(item["Time"] for item in subst if item["SubOff"] == f"p{player_id}"),
matchstatsdict["match_time"]
if player_id not in red_cards
else red_cards[player_id],
)
minutes_played = sub_off - sub_on
lineups[team_id]["players"][player_id] = dict(
jersey_number=attr["ShirtNumber"],
starting_position_name=attr["Position"],
starting_position_id=attr["position_id"],
is_starter=attr["Status"] == "Start",
minutes_played=minutes_played,
**playerstatsdict,
)
return lineups
def extract_teamgamestats(self) -> List[Dict[str, Any]]:
"""Return some aggregated statistics of each team.
Raises
------
MissingDataError
If teams data is not available in the stream.
Returns
-------
list(dict)
A dictionary with aggregated team statistics for each team.
"""
optadocument = self._get_doc()
attr = assertget(optadocument, "@attributes")
game_id = int(assertget(attr, "uID")[1:])
try:
rootf9 = optadocument["MatchData"]["TeamData"]
except KeyError as e:
raise MissingDataError from e
teams_gamestats = []
for team in rootf9:
attr = team["@attributes"]
statsdict = self._get_stats(team)
team_gamestats = dict(
game_id=game_id,
team_id=int(attr["TeamRef"].replace("t", "")),
side=attr["Side"],
score=attr["Score"],
shootout_score=attr["ShootOutScore"],
**statsdict,
)
teams_gamestats.append(team_gamestats)
return teams_gamestats
|
7438a6554c52f8ef8f6930f8e44a0bd8c9012dc0
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/landlab/utils/window_statistic.py
|
9d9fe118463b6eb83b9adcaf9bfb52a6b4e61c48
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 8,643
|
py
|
window_statistic.py
|
"""Function to calculate node statistics in a moving window."""
import numpy as np
from landlab import FieldError
def calculate_window_statistic(
grid, field, func, search_radius, calc_on_closed_nodes=True, **kwargs
):
"""Calculate a statistic using a function within a search window.
.. note::
This only works on grid **nodes** (not other grid elements e.g. links) for
any :class:`~.ModelGrid` type.
This utility outputs an array of length equal to the grid's number of
nodes. Each element of the output array represents the node location in
the grid. The value of each element is a function of the nodes within the
search window surrounding that node location (see the model grid diagram
below).
The grid below contains six columns and five rows with cell spacing set
to 10 distance units. This utility iteratively evaluates all nodes in the
grid. The diagram shows evaluation of node ID 15 (marked ``x``). If the
search radius is set to 20, twice the cell spacing, each node marked with
a ``*`` is within the search window.
::
· · * · · ·
· * * * · ·
* * x * * ·
· * * * · ·
· · * · · ·
Increasing the search radius to 25 results in the following search window.
::
· * * * · ·
* * * * * ·
* * x * * ·
* * * * * ·
· * * * · ·
Decreasing the search radius to 15 results in the following search window.
::
· · · · · ·
· * * * · ·
· * x * · ·
· * * * · ·
· · · · · ·
The input field can be any field assigned to grid nodes (e.g.
"topographic__elevation") and the input function can be any function that
acts on the input field (e.g. "np.min" to find the minimum). The input
function may be user defined and may contain any number of inputs, which
are input as ``kwargs``.
For example, if the input field is "topographic__elevation" and the input
function is ``np.ptp`` (peak-to-peak, meaning max minus min value), then the
output at node 15 will be the maximum elevation within the search window
minus the minimum elevation within the search window (also known as relief).
The ``np.percentile`` function, however, requires not only the input field,
but also an input value to define the "q-th percentile" to be calculated.
This second input would be added as a ``kwarg`` (e.g. ``q=90``) at the end of
the inputs for :func:`~calculate_window_statistic`. Both of these scenarios are
shown in the examples below.
Parameters
----------
grid : ModelGrid
A Landlab ModelGrid.
field : string
An existing grid field on which to calculate the statistic of interest.
Must exist in grid.
func : function
The function that calculates the window statistic of *field*.
The first parameter of the function must be the values at nodes within
the window, which are used used to calculate the statistic for the
node under evaluation. Additional parameters of the function can be
passed with ``kwargs``.
search_radius : float
Radius of window within which the statistic is calculated.
calc_on_closed_nodes : boolean, optional
Toggle calculation over all nodes including closed nodes (``True``) or all
nodes except closed nodes (``False``).
kwargs : optional
Keyword arguments passed to *func* that are additional to the array of
node values within the search window.
Returns
-------
output : ndarray
Output array containing the calculated values of the statistic. Same
length as input field.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.utils import window_statistic
>>> grid = RasterModelGrid((5, 6), xy_spacing=10.0)
>>> grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
>>> z = grid.add_zeros("topographic__elevation", at="node")
>>> z += np.arange(len(z))
Calculate relief using ``np.ptp`` function.
>>> relief = calculate_window_statistic(
... grid, "topographic__elevation", np.ptp, search_radius=15
... )
>>> grid.at_node["topographic__elevation"]
array([ 0., 1., 2., 3., 4., 5.,
6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23.,
24., 25., 26., 27., 28., 29.])
>>> relief
array([ 7., 8., 8., 8., 8., 7.,
13., 14., 14., 14., 14., 13.,
13., 14., 14., 14., 14., 13.,
13., 14., 14., 14., 14., 13.,
7., 8., 8., 8., 8., 7.])
Calculate relief using ``np.ptp`` function excluding closed nodes.
>>> relief = calculate_window_statistic(
... grid,
... "topographic__elevation",
... np.ptp,search_radius=15,
... calc_on_closed_nodes=False,
... )
>>> grid.at_node["topographic__elevation"]
array([ 0., 1., 2., 3., 4., 5.,
6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23.,
24., 25., 26., 27., 28., 29.])
>>> relief
array([ nan, nan, nan, nan, nan, nan,
7., 8., 8., 8., 8., 7.,
13., 14., 14., 14., 14., 13.,
7., 8., 8., 8., 8., 7.,
nan, nan, nan, nan, nan, nan])
Calculate 90th percentile using ``np.percentile`` function and ``kwargs``.
>>> perc_90 = calculate_window_statistic(
... grid,
... "topographic__elevation",
... np.percentile,search_radius=15,
... calc_on_closed_nodes=False,
... q=90
... )
>>> grid.at_node["topographic__elevation"]
array([ 0., 1., 2., 3., 4., 5.,
6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23.,
24., 25., 26., 27., 28., 29.])
>>> perc_90
array([ nan, nan, nan, nan, nan, nan,
12.7, 13.5, 14.5, 15.5, 16.5, 16.7,
18.5, 19.2, 20.2, 21.2, 22.2, 22.5,
18.7, 19.5, 20.5, 21.5, 22.5, 22.7,
nan, nan, nan, nan, nan, nan])
Calculate relief above 90th percentile elevation using a user-defined
function and ``kwargs``.
>>> def max_minus_percentile(elev, q):
... output = np.max(elev) - np.percentile(elev, q)
... return output
>>> rel_above_90th_perc = calculate_window_statistic(
... grid,
... "topographic__elevation",
... max_minus_percentile,
... search_radius=15,
... calc_on_closed_nodes=False,
... q=90,
... )
>>> grid.at_node["topographic__elevation"]
array([ 0., 1., 2., 3., 4., 5.,
6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23.,
24., 25., 26., 27., 28., 29.])
>>> rel_above_90th_perc
array([ nan, nan, nan, nan, nan, nan,
0.3, 0.5, 0.5, 0.5, 0.5, 0.3,
0.5, 0.8, 0.8, 0.8, 0.8, 0.5,
0.3, 0.5, 0.5, 0.5, 0.5, 0.3,
nan, nan, nan, nan, nan, nan])
"""
if field not in grid.at_node:
raise FieldError(f"A {field} field is required at the nodes of the input grid.")
# Create output array
output = np.zeros(grid.number_of_nodes)
# Create arrays of x and y coords for input to "distance to point' calc
x_coord = grid.x_of_node
y_coord = grid.y_of_node
nodes_in_loop = grid.nodes.flatten()
nodes_to_include = np.ones(grid.number_of_nodes, dtype=bool)
if calc_on_closed_nodes is False:
closed_nodes = grid.status_at_node == grid.BC_NODE_IS_CLOSED
nodes_in_loop = nodes_in_loop[~closed_nodes]
nodes_to_include[closed_nodes] = False
output[closed_nodes] = np.NaN
# Calculate "dist to point" then local value at nodes within window.
for node in nodes_in_loop:
node_dist_to_point = grid.calc_distances_of_nodes_to_point(
(x_coord[node], y_coord[node])
)
nodes_in_window = np.all(
[node_dist_to_point <= search_radius, nodes_to_include], 0
)
values_in_window = grid.at_node[field][nodes_in_window]
output[node] = func(values_in_window, **kwargs)
return output
|
d2449e6fbaca8d369761f09997a6603b8c279e2e
|
e07fc7e64c91c4026d931528a41b67b3eb5918c2
|
/rl_agents/trainer/graphics.py
|
f5e6c79ab93537c2d53e78fdb29f4536bf08bdbe
|
[
"MIT"
] |
permissive
|
eleurent/rl-agents
|
18a854f753780d80945048c9a7fb4dc63ea885f6
|
63d3734123fffcfcc84d9fc84195256a94d74638
|
refs/heads/master
| 2023-08-15T05:05:13.132900
| 2023-07-25T10:12:06
| 2023-07-25T10:12:06
| 93,446,064
| 490
| 150
|
MIT
| 2023-08-07T13:16:32
| 2017-06-05T20:53:59
|
Python
|
UTF-8
|
Python
| false
| false
| 670
|
py
|
graphics.py
|
from __future__ import division, print_function
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set()
class RewardViewer(object):
def __init__(self):
self.rewards = []
def update(self, reward):
self.rewards.append(reward)
self.display()
def display(self):
plt.figure(num='Rewards')
plt.clf()
plt.title('Total reward')
plt.xlabel('Episode')
plt.ylabel('Reward')
rewards = pd.Series(self.rewards)
means = rewards.rolling(window=100).mean()
plt.plot(rewards)
plt.plot(means)
plt.pause(0.001)
plt.plot(block=False)
|
27b72da12a82d98a13c42143da209e0955352063
|
fe85b4811c93510006b666858d6029156f167f89
|
/bin/mtag/audio-bpm.py
|
d7f2c1c54a2ca243538fcf08e12450dbac859f62
|
[
"MIT",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"GPL-1.0-only",
"GPL-3.0-only",
"GPL-2.0-only",
"Artistic-2.0"
] |
permissive
|
9001/copyparty
|
39207421ccdc501566105da562a168996e0f9b4c
|
48a3898aa692770735a926b0c18300d7da8b021f
|
refs/heads/hovudstraum
| 2023-08-18T15:19:36.934124
| 2023-08-16T19:57:19
| 2023-08-16T19:57:19
| 188,700,274
| 273
| 21
|
MIT
| 2023-08-09T20:50:27
| 2019-05-26T15:28:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
audio-bpm.py
|
#!/usr/bin/env python
import os
import sys
import vamp
import tempfile
import numpy as np
import subprocess as sp
from copyparty.util import fsenc
"""
dep: vamp
dep: beatroot-vamp
dep: ffmpeg
"""
# save beat timestamps to ".beats/filename.txt"
SAVE = False
def det(tf):
# fmt: off
sp.check_call([
b"ffmpeg",
b"-nostdin",
b"-hide_banner",
b"-v", b"fatal",
b"-y", b"-i", fsenc(sys.argv[1]),
b"-map", b"0:a:0",
b"-ac", b"1",
b"-ar", b"22050",
b"-t", b"360",
b"-f", b"f32le",
fsenc(tf)
])
# fmt: on
with open(tf, "rb") as f:
d = np.fromfile(f, dtype=np.float32)
try:
# 98% accuracy on jcore
c = vamp.collect(d, 22050, "beatroot-vamp:beatroot")
cl = c["list"]
except:
# fallback; 73% accuracy
plug = "vamp-example-plugins:fixedtempo"
c = vamp.collect(d, 22050, plug, parameters={"maxdflen": 40})
print(c["list"][0]["label"].split(" ")[0])
return
# throws if detection failed:
beats = [float(x["timestamp"]) for x in cl]
bds = [b - a for a, b in zip(beats, beats[1:])]
bds.sort()
n0 = int(len(bds) * 0.2)
n1 = int(len(bds) * 0.75) + 1
bds = bds[n0:n1]
bpm = sum(bds)
bpm = round(60 * (len(bds) / bpm), 2)
print(f"{bpm:.2f}")
if SAVE:
fdir, fname = os.path.split(sys.argv[1])
bdir = os.path.join(fdir, ".beats")
try:
os.mkdir(fsenc(bdir))
except:
pass
fp = os.path.join(bdir, fname) + ".txt"
with open(fsenc(fp), "wb") as f:
txt = "\n".join([f"{x:.2f}" for x in beats])
f.write(txt.encode("utf-8"))
def main():
with tempfile.NamedTemporaryFile(suffix=".pcm", delete=False) as f:
f.write(b"h")
tf = f.name
try:
det(tf)
except:
pass # mute
finally:
os.unlink(tf)
if __name__ == "__main__":
main()
|
a8f0a3ac18fdba5b5fc2da221958540a8e1436b8
|
a88b87b745ba50287c0d44dc6ac06b4f3a27624a
|
/C++指标计算引擎/py版本/hqchartpy2_tushare_test.py
|
7d680626d648c72db987aac8b994edce76c3a4de
|
[
"Apache-2.0"
] |
permissive
|
jones2000/HQChart
|
1739d93300babbb3967d8cb1746bbcc49432a1ee
|
3e490ccc044aa41c240fbb9b913a764419fd9e99
|
refs/heads/master
| 2023-09-04T00:20:02.134641
| 2023-09-01T02:09:02
| 2023-09-01T02:09:02
| 125,995,921
| 2,675
| 716
|
Apache-2.0
| 2023-05-17T04:57:55
| 2018-03-20T09:51:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
hqchartpy2_tushare_test.py
|
#############################################################
## hqchartPy2使用tushare数据对接测试用例
##
##
#############################################################
from hqchartpy2_fast import FastHQChart,PERIOD_ID
from hqchartpy2_pandas import HQChartPy2Helper
from hqchartpy2_tushare import TushareHQChartData, TushareKLocalHQChartData, HQResultTest
from hqchartpy2_tushare_config import TushareConfig
import json
import time
import numpy as np
import pandas as pd
import datetime
import uuid
class HQSelectDataFrameResult():
def __init__(self):
self.dfResult={} #保存所有的执行结果 key=代码 value=df数据
self.Error=[]
# 执行成功回调
def RunSuccess(self, symbol, jsData, jobID):
log="[HQSelectDataFrameResult::RunSuccess] {0} success".format(symbol)
print (log)
data=HQChartPy2Helper.JsonDataToPandas(jsData, symbol) # 指标数据转dataFrame
self.dfResult[symbol]=data
# 执行失败回调
def RunFailed(self, code, symbol, error,jobID) :
log="[HQSelectDataFrameResult::RunFailed] {0}\n{1} failed\n{2}".format(code, symbol,error)
self.Error.append(error)
print(log)
# 股票执行测试
def RunIndexTest(runConfig):
jsConfig = json.dumps(runConfig) # 运行配置项
hqData=TushareKLocalHQChartData(TushareConfig.TUSHARE_AUTHORIZATION_KEY,startDate=20200421, endDate=20201231, cachePath="test_data\\") # 实例化数据类
result=HQSelectDataFrameResult() # 实例计算结果接收类
start = time.process_time()
res=FastHQChart.Run(jsConfig,hqData,proSuccess=result.RunSuccess, procFailed=result.RunFailed)
elapsed = (time.process_time() - start)
log='''RunSingleStockIndex()
---------------------------------------------------------------
耗时:{0}s,
股票个数:{1},
脚本:
{2}
执行是否成功:{3}
---------------------------------------------------------------'''.format(elapsed,len(runConfig['Symbol']), runConfig["Script"], res)
print(log)
if (res==True):
for item in result.dfResult.items() :
symbol= item[0]
print('{0} 数据:'.format(symbol))
print(item[1])
if __name__ == '__main__':
if (TushareConfig.HQCHART_AUTHORIZATION_KEY==None) :
# 请求试用账户, 把mac地址改成你本机的mac地址
TushareConfig.HQCHART_AUTHORIZATION_KEY=FastHQChart.GetTrialAuthorize(mac="A4-B1-C1-4B-4D-7B")
FastHQChart.Initialization(TushareConfig.HQCHART_AUTHORIZATION_KEY) # 初始化HQChartPy插件
runConfig={
# 指标脚本
"Script":'''
MO:MACD.MACD#MONTH,COLORYELLOW;
WE:MACD.MACD#WEEK,COLORMAGENTA,LINETHICK1;
DA2:MACD.MACD#MULTIDAY,COLORWHITE,LINETHICK1;
DA:MACD.MACD#DAY,COLORGREEN;
''',
# 脚本参数
"Args": [ { "Name":"M1", "Value":15 }, { "Name":"M2", "Value":20 }, { "Name":"M3", "Value":30} ],
# 周期 复权
"Period":0, # 周期 0=日线 1=周线 2=月线 3=年线 9=季线
"Right":0, # 复权 0=不复权 1=前复权 2=后复权
"Symbol":["000001.sz","600000.sh"],
"OutCount":100, # 输出最新的100条数据
#jobID (可选)
"JobID":str(uuid.uuid1())
}
# 测试股票指标计算
RunIndexTest(runConfig)
|
3aaac58ab339aa65c57a500455b92da9cc8ba6f0
|
a00ceae7fc625dc8a76341f32c76569090ca8489
|
/examples/low_level/login_with_auth.py
|
c11aa9f2cb48d0e08b58f8f741acf64ec4aa9e59
|
[
"MIT"
] |
permissive
|
pycontribs/jenkinsapi
|
23be879b284437f1b061dede69d782634da3c171
|
44942789b912de3d4952cc3c517562e92259a409
|
refs/heads/master
| 2023-08-28T16:43:22.641088
| 2023-06-16T09:18:19
| 2023-08-19T03:18:18
| 3,111,557
| 606
| 335
|
MIT
| 2023-08-19T03:18:20
| 2012-01-05T16:43:13
|
Python
|
UTF-8
|
Python
| false
| false
| 245
|
py
|
login_with_auth.py
|
"""
A lower level example of how we login with authentication
"""
from __future__ import print_function
from jenkinsapi import jenkins
J = jenkins.Jenkins("http://localhost:8080", username="sal", password="foobar")
J.poll()
print(J.items())
|
ecb83fa2a24f223c29e6cb3ed024bec5675935c6
|
532e3f4530353fdf9b6befb8a88c1dd450f42c36
|
/query_freshness.py
|
8dca2fd511817a6d033b02b61c5a5b5de8a3139f
|
[
"BSD-3-Clause"
] |
permissive
|
DataDog/Miscellany
|
9aad31b7ac2d635c1a9dd9af8f4ab927c9914e3a
|
b55849ab0b2ec95642d4082a834218b32276884e
|
refs/heads/master
| 2023-08-10T20:20:41.437060
| 2022-09-23T06:40:40
| 2022-09-23T06:40:40
| 48,389,989
| 204
| 149
|
BSD-3-Clause
| 2023-07-21T13:35:55
| 2015-12-21T19:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
query_freshness.py
|
#########################################################################
#Use a cronjob to submit regularily the Metric
#
#crontab -e
#(vim) */1 * * * * cd <path_to_the_script> ; python query_freshness.py
#(vim) !wq
#crontab -l
#########################################################################
from datadog import initialize, api
import time
options = {
'api_key': '149***e4',
'app_key': 'f2e***a5'
}
initialize(**options)
now = int(time.time())
query = 'system.cpu.system{*}' # Modify the metric you want to track the freshness of
metric = api.Metric.query(start=now - 3600, end=now, query=query) # how far back in time you want to go (the default is one hour)
nb_points = len(metric['series'][0]['pointlist']) - 1
last_submission = int(metric['series'][0]['pointlist'][nb_points][0]) / 1000
freshness = now - last_submission
print "freshness is: ", freshness
api.Metric.send(metric='freshness', points=freshness, tags=["metric:" + str(query)])
|
cfae5aaa39347e3afae3a3677dac804417e18929
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Validation/RecoHI/test/step2_RAW2DIGI_RECO_VALIDATION.py
|
96c7aaeaed22b4dad06089c925c99a36941413d1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,680
|
py
|
step2_RAW2DIGI_RECO_VALIDATION.py
|
# Auto generated configuration file
# using:
# Revision: 1.173
# Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: step2 -s RAW2DIGI,RECO,VALIDATION -n 2 --filein file:step1_HLT.root --eventcontent RECOSIM --conditions MC_37Y_V0::All --scenario HeavyIons --himix --mc --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('RECO2')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.StandardSequences.HiEventMixing_cff')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('Configuration.StandardSequences.ValidationHeavyIons_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:2'),
name = cms.untracked.string('PyReleaseValidation')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.options = cms.untracked.PSet(
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:step1_HLT.root')
)
# Output definition
process.output = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('step2_RAW2DIGI_RECO_VALIDATION.root'),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string(''),
filterName = cms.untracked.string('')
)
)
# Additional output definition
# Other statements
process.mix.playback = True
process.GlobalTag.globaltag = 'MC_37Y_V0::All'
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.reconstruction_step = cms.Path(process.reconstructionHeavyIons)
process.validation_step = cms.Path(process.validationHeavyIons)
process.endjob_step = cms.Path(process.endOfProcess)
process.out_step = cms.EndPath(process.output)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.reconstruction_step,process.validation_step,process.endjob_step,process.out_step)
|
f760f3bdf68bed0862c4a586d78be0f11e67a2f1
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/caffe2/python/models/__sym_init__.py
|
79f045879ebcd445282402b560e651e0a289c283
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
__sym_init__.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from caffe2.proto import caffe2_pb2
def _parseFile(filename):
out_net = caffe2_pb2.NetDef()
# TODO(bwasti): A more robust handler for pathnames.
dir_path = os.path.dirname(__file__)
with open('{dir_path}/{filename}'.format(dir_path=dir_path,
filename=filename), 'rb') as f:
out_net.ParseFromString(f.read())
return out_net
init_net = _parseFile('init_net.pb')
predict_net = _parseFile('predict_net.pb')
|
63305545ad7f1d1d3c797317cad5e910a8d49e9d
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/MissionsPremiumViewMeta.py
|
1199ef1cfbe5aa394c443abd645494a2289f1968
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 292
|
py
|
MissionsPremiumViewMeta.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/MissionsPremiumViewMeta.py
from gui.Scaleform.daapi.view.meta.MissionsViewBaseMeta import MissionsViewBaseMeta
class MissionsPremiumViewMeta(MissionsViewBaseMeta):
pass
|
482149c3df0a65bb8eb5f7181744e79c082227e1
|
5fa348b53191cc8d8cb8df2d213d1a0d1a07a820
|
/hls4ml/utils/plot.py
|
e3424bb1ad53bacfdafa38a8ac947d159f94d2ea
|
[
"Apache-2.0"
] |
permissive
|
fastmachinelearning/hls4ml
|
29496536ca5a63a51e009d246019680bd4950007
|
4b4b5a0cf0bc59801e94b2bb64d63d8734645c3b
|
refs/heads/main
| 2023-08-31T15:52:18.564650
| 2023-08-28T22:57:50
| 2023-08-28T23:25:36
| 108,329,371
| 562
| 199
|
Apache-2.0
| 2023-09-13T17:19:05
| 2017-10-25T21:43:56
|
C++
|
UTF-8
|
Python
| false
| false
| 8,204
|
py
|
plot.py
|
# Heavily inspired by Keras's plot_model
"""Utilities related to model visualization."""
import os
import sys
try:
import pydot
except ImportError:
pydot = None
def check_pydot():
"""Returns True if PyDot and Graphviz are available."""
if pydot is None:
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except OSError:
return False
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
def model_to_dot(
model, show_shapes=False, show_layer_names=True, show_precision=False, rankdir='TB', dpi=96, subgraph=False
):
"""Convert a HLS model to dot format.
Arguments:
model: A HLS model instance.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
show_precision: whether to display precision of layer's variables.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
dpi: Dots per inch.
subgraph: whether to return a `pydot.Cluster` instance.
Returns:
A `pydot.Dot` instance representing the HLS model or
a `pydot.Cluster` instance representing nested model if
`subgraph=True`.
Raises:
ImportError: if graphviz or pydot are not available.
"""
if not check_pydot():
if 'IPython.core.magics.namespace' in sys.modules:
# We don't raise an exception here in order to avoid crashing notebook
# tests where graphviz is not available.
print('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.')
return
else:
raise ImportError('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.')
if subgraph:
dot = pydot.Cluster(style='dashed', graph_name=model.name)
dot.set('label', model.name)
dot.set('labeljust', 'l')
else:
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set('dpi', dpi)
dot.set_node_defaults(shape='record')
layers = model.get_layers()
# Create graph nodes.
for i, layer in enumerate(layers):
# layer_id = str(id(layer))
layer_id = str(layer.index)
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.class_name
# Create node's label.
if show_layer_names:
# label = '{}: {}'.format(class_name, layer_name)
# label = '{}\\l{}\\l'.format(class_name, layer_name)
label = f'<b>{class_name}</b><br align="left" />{layer_name}'
else:
label = class_name
# Rebuild the label as a table including input/output shapes.
if show_shapes:
def format_shape(shape):
return str(tuple(shape)).replace(str(None), '?')
input_labels = '?'
try:
output_labels = format_shape(layer.get_output_variable().shape)
except AttributeError:
output_labels = '?'
if class_name != 'Input':
if len(layer.inputs) > 1:
input_shapes = []
for i in layer.inputs:
input_layer = layer.get_input_variable(i)
if input_layer is not None:
input_shapes.append(input_layer.shape)
else:
input_shapes.append('?')
formatted_shapes = [format_shape(ishape) for ishape in input_shapes]
input_labels = ', '.join(formatted_shapes)
else:
input_layer = layer.get_input_variable()
if input_layer is not None:
input_labels = format_shape(input_layer.shape)
label = f'{label}\n|{{input: {input_labels}|output: {output_labels}}}'
# Rebuild the label as a table including tensor precision.
if show_precision:
def format_precision(precision):
return str(precision).replace('<', '<').replace('>', '>')
precision_labels = []
tensors = {}
tensors.update(layer.weights)
if len(layer.variables) == 1:
# A bit cleaner output
tensors['output'] = layer.get_output_variable()
else:
tensors.update(layer.variables)
for tensor_name, var in tensors.items():
if show_shapes:
# tensor_label = '{} {}: {}'.format(tensor_name,
tensor_label = '<tr><td align="left">{} {}:</td><td align="left">{}</td></tr>'.format(
tensor_name, format_shape(var.shape), format_precision(var.type.precision)
)
else:
# tensor_label = '{}: {}'.format(tensor_name,
tensor_label = '<tr><td align="left">{}:</td><td align="left">{}</td></tr>'.format(
tensor_name, format_precision(var.type.precision)
)
precision_labels.append(tensor_label)
# precision_label = '<br align="left" />'.join(precision_labels)
precision_label = ''.join(precision_labels)
precision_label = '<table border="0" cellspacing="0">' + precision_label + '</table>'
label = f'{label}|{{{precision_label}}}'
label = '<' + label + '>'
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(layer.index)
for input_name in layer.inputs:
input_layer = layer.get_input_node(input_name)
if input_layer is not None:
input_layer_id = str(input_layer.index)
add_edge(dot, input_layer_id, layer_id)
return dot
def plot_model(
model, to_file='model.png', show_shapes=False, show_layer_names=True, show_precision=False, rankdir='TB', dpi=96
):
"""Converts a HLS model to dot format and save to a file.
Arguments:
model: A HLS model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
show_precision: whether to display precision of layer's variables.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
dpi: Dots per inch.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
dot = model_to_dot(
model,
show_shapes=show_shapes,
show_layer_names=show_layer_names,
show_precision=show_precision,
rankdir=rankdir,
dpi=dpi,
)
if dot is None:
return
if to_file is not None:
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
else:
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
try:
import tempfile
from IPython import display
temp = tempfile.NamedTemporaryFile(suffix='.png')
dot.write(temp.name, format='png')
return display.Image(filename=temp.name)
except ImportError:
pass
|
e496a6013e2a0cc0cd6ba3f344f5d669f1b070ff
|
d8aabbc108b074817cb05eba4acff68d4f5c2d6c
|
/tensorwatch/saliency/deeplift.py
|
f8e208550933cea4c2a8c36fcad0927c272d4083
|
[
"MIT",
"BSD-2-Clause",
"LGPL-2.1-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
microsoft/tensorwatch
|
e5e868795bd1536f9f2e3cb56b34a97a82e6704e
|
f59730dc7a8735232ef417685800652372c3b5dd
|
refs/heads/master
| 2023-06-29T21:52:27.900779
| 2023-06-12T18:21:59
| 2023-06-12T18:21:59
| 186,783,422
| 3,626
| 394
|
MIT
| 2023-08-30T06:59:14
| 2019-05-15T08:29:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,713
|
py
|
deeplift.py
|
from .backprop import GradxInputExplainer
import types
import torch.nn.functional as F
from torch.autograd import Variable
# Based on formulation in DeepExplain, https://arxiv.org/abs/1711.06104
# https://github.com/marcoancona/DeepExplain/blob/master/deepexplain/tensorflow/methods.py#L221-L272
class DeepLIFTRescaleExplainer(GradxInputExplainer):
def __init__(self, model):
super(DeepLIFTRescaleExplainer, self).__init__(model)
self._prepare_reference()
self.baseline_inp = None
self._override_backward()
def _prepare_reference(self):
def init_refs(m):
name = m.__class__.__name__
if name.find('ReLU') != -1:
m.ref_inp_list = []
m.ref_out_list = []
def ref_forward(self, x):
self.ref_inp_list.append(x.data.clone())
out = F.relu(x)
self.ref_out_list.append(out.data.clone())
return out
def ref_replace(m):
name = m.__class__.__name__
if name.find('ReLU') != -1:
m.forward = types.MethodType(ref_forward, m)
self.model.apply(init_refs)
self.model.apply(ref_replace)
def _reset_preference(self):
def reset_refs(m):
name = m.__class__.__name__
if name.find('ReLU') != -1:
m.ref_inp_list = []
m.ref_out_list = []
self.model.apply(reset_refs)
def _baseline_forward(self, inp):
if self.baseline_inp is None:
self.baseline_inp = inp.data.clone()
self.baseline_inp.fill_(0.0)
self.baseline_inp = Variable(self.baseline_inp)
else:
self.baseline_inp.fill_(0.0)
# get ref
_ = self.model(self.baseline_inp)
def _override_backward(self):
def new_backward(self, grad_out):
ref_inp, inp = self.ref_inp_list
ref_out, out = self.ref_out_list
delta_out = out - ref_out
delta_in = inp - ref_inp
g1 = (delta_in.abs() > 1e-5).float() * grad_out * \
delta_out / delta_in
mask = ((ref_inp + inp) > 0).float()
g2 = (delta_in.abs() <= 1e-5).float() * 0.5 * mask * grad_out
return g1 + g2
def backward_replace(m):
name = m.__class__.__name__
if name.find('ReLU') != -1:
m.backward = types.MethodType(new_backward, m)
self.model.apply(backward_replace)
def explain(self, inp, ind=None, raw_inp=None):
self._reset_preference()
self._baseline_forward(inp)
g = super(DeepLIFTRescaleExplainer, self).explain(inp, ind)
return g
|
fc2ee86d9a38b0a5a44fde05a276bf29d7bb63de
|
b71a6e7050b0a4368007350d91ee078288a7318c
|
/examples/1_tree.py
|
937d410963c8ec297194da5252ab5abe8b933597
|
[
"Apache-2.0"
] |
permissive
|
jarvisteach/appJar
|
2dfd0da6cb85ea3535379ed000efd97fb42fe4f8
|
0b59ce041da2197dcff3410e20f298676f1f7266
|
refs/heads/appJar
| 2023-08-29T09:42:01.812005
| 2019-09-28T18:34:06
| 2019-09-28T18:34:06
| 39,996,518
| 696
| 103
|
NOASSERTION
| 2023-02-20T01:01:16
| 2015-07-31T08:59:20
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
1_tree.py
|
import sys
sys.path.append("../")
from appJar import gui
def press(btn): print(btn)
def press2(btn): print("EDIT", btn)
app=gui()
data="<people><person><name>Richard</name><age>21</age></person><person><name>kh</name><age>44</age></person></people>"
app.addTree("tree", data)
app.setTreeDoubleClickFunction("tree", press)
app.setTreeEditFunction("tree", press2)
app.go()
|
32b3fb2fa1b74fa2ccb36c9b553574e0f7125450
|
368960cc5dc09284dff33129b2b8c31773705b81
|
/src/icalendar/cal.py
|
35a288bfffe8f490caabc473a9bd25aed3c3f9f4
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
collective/icalendar
|
802f70ecaa325cc5a8ce65e0c41bfcbdccf0a97b
|
8fb3353408df94d54b74a7e05586fd6c99eed7ef
|
refs/heads/master
| 2023-09-01T08:50:59.574456
| 2023-08-31T06:05:59
| 2023-08-31T06:05:59
| 2,222,138
| 716
| 166
|
NOASSERTION
| 2023-09-06T17:54:06
| 2011-08-17T14:16:35
|
Python
|
UTF-8
|
Python
| false
| false
| 26,597
|
py
|
cal.py
|
"""Calendar is a dictionary like Python object that can render itself as VCAL
files according to rfc2445.
These are the defined components.
"""
from datetime import datetime, timedelta
from icalendar.caselessdict import CaselessDict
from icalendar.parser import Contentline
from icalendar.parser import Contentlines
from icalendar.parser import Parameters
from icalendar.parser import q_join
from icalendar.parser import q_split
from icalendar.parser_tools import DEFAULT_ENCODING
from icalendar.prop import TypesFactory
from icalendar.prop import vText, vDDDLists
from icalendar.timezone_cache import _timezone_cache
import pytz
import dateutil.rrule, dateutil.tz
from pytz.tzinfo import DstTzInfo
######################################
# The component factory
class ComponentFactory(CaselessDict):
"""All components defined in rfc 2445 are registered in this factory class.
To get a component you can use it like this.
"""
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
super().__init__(*args, **kwargs)
self['VEVENT'] = Event
self['VTODO'] = Todo
self['VJOURNAL'] = Journal
self['VFREEBUSY'] = FreeBusy
self['VTIMEZONE'] = Timezone
self['STANDARD'] = TimezoneStandard
self['DAYLIGHT'] = TimezoneDaylight
self['VALARM'] = Alarm
self['VCALENDAR'] = Calendar
# These Properties have multiple property values inlined in one propertyline
# seperated by comma. Use CaselessDict as simple caseless set.
INLINE = CaselessDict({
'CATEGORIES': 1,
'RESOURCES': 1,
'FREEBUSY': 1,
})
_marker = []
class Component(CaselessDict):
"""Component is the base object for calendar, Event and the other
components defined in RFC 2445. Normally you will not use this class
directly, but rather one of the subclasses.
"""
name = None # should be defined in each component
required = () # These properties are required
singletons = () # These properties must only appear once
multiple = () # may occur more than once
exclusive = () # These properties are mutually exclusive
inclusive = () # if any occurs the other(s) MUST occur
# ('duration', 'repeat')
ignore_exceptions = False # if True, and we cannot parse this
# component, we will silently ignore
# it, rather than let the exception
# propagate upwards
# not_compliant = [''] # List of non-compliant properties.
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
super().__init__(*args, **kwargs)
# set parameters here for properties that use non-default values
self.subcomponents = [] # Components can be nested.
self.errors = [] # If we ignored exception(s) while
# parsing a property, contains error strings
# def is_compliant(self, name):
# """Returns True is the given property name is compliant with the
# icalendar implementation.
#
# If the parser is too strict it might prevent parsing erroneous but
# otherwise compliant properties. So the parser is pretty lax, but it is
# possible to test for non-compliance by calling this method.
# """
# return name in not_compliant
def __bool__(self):
"""Returns True, CaselessDict would return False if it had no items.
"""
return True
# python 2 compatibility
__nonzero__ = __bool__
def is_empty(self):
"""Returns True if Component has no items or subcomponents, else False.
"""
return True if not (list(self.values()) + self.subcomponents) else False # noqa
@property
def is_broken(self):
return bool(self.errors)
#############################
# handling of property values
def _encode(self, name, value, parameters=None, encode=1):
"""Encode values to icalendar property values.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: icalendar property value
"""
if not encode:
return value
if isinstance(value, types_factory.all_types):
# Don't encode already encoded values.
return value
klass = types_factory.for_property(name)
obj = klass(value)
if parameters:
if isinstance(parameters, dict):
params = Parameters()
for key, item in parameters.items():
params[key] = item
parameters = params
assert isinstance(parameters, Parameters)
obj.params = parameters
return obj
def add(self, name, value, parameters=None, encode=1):
"""Add a property.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: None
"""
if isinstance(value, datetime) and\
name.lower() in ('dtstamp', 'created', 'last-modified'):
# RFC expects UTC for those... force value conversion.
if getattr(value, 'tzinfo', False) and value.tzinfo is not None:
value = value.astimezone(pytz.utc)
else:
# assume UTC for naive datetime instances
value = pytz.utc.localize(value)
# encode value
if encode and isinstance(value, list) \
and name.lower() not in ['rdate', 'exdate', 'categories']:
# Individually convert each value to an ical type except rdate and
# exdate, where lists of dates might be passed to vDDDLists.
value = [self._encode(name, v, parameters, encode) for v in value]
else:
value = self._encode(name, value, parameters, encode)
# set value
if name in self:
# If property already exists, append it.
oldval = self[name]
if isinstance(oldval, list):
if isinstance(value, list):
value = oldval + value
else:
oldval.append(value)
value = oldval
else:
value = [oldval, value]
self[name] = value
def _decode(self, name, value):
"""Internal for decoding property values.
"""
# TODO: Currently the decoded method calls the icalendar.prop instances
# from_ical. We probably want to decode properties into Python native
# types here. But when parsing from an ical string with from_ical, we
# want to encode the string into a real icalendar.prop property.
if isinstance(value, vDDDLists):
# TODO: Workaround unfinished decoding
return value
decoded = types_factory.from_ical(name, value)
# TODO: remove when proper decoded is implemented in every prop.* class
# Workaround to decode vText properly
if isinstance(decoded, vText):
decoded = decoded.encode(DEFAULT_ENCODING)
return decoded
def decoded(self, name, default=_marker):
"""Returns decoded value of property.
"""
# XXX: fail. what's this function supposed to do in the end?
# -rnix
if name in self:
value = self[name]
if isinstance(value, list):
return [self._decode(name, v) for v in value]
return self._decode(name, value)
else:
if default is _marker:
raise KeyError(name)
else:
return default
########################################################################
# Inline values. A few properties have multiple values inlined in in one
# property line. These methods are used for splitting and joining these.
def get_inline(self, name, decode=1):
"""Returns a list of values (split on comma).
"""
vals = [v.strip('" ') for v in q_split(self[name])]
if decode:
return [self._decode(name, val) for val in vals]
return vals
def set_inline(self, name, values, encode=1):
"""Converts a list of values into comma separated string and sets value
to that.
"""
if encode:
values = [self._encode(name, value, encode=1) for value in values]
self[name] = types_factory['inline'](q_join(values))
#########################
# Handling of components
def add_component(self, component):
"""Add a subcomponent to this component.
"""
self.subcomponents.append(component)
def _walk(self, name):
"""Walk to given component.
"""
result = []
if name is None or self.name == name:
result.append(self)
for subcomponent in self.subcomponents:
result += subcomponent._walk(name)
return result
def walk(self, name=None):
"""Recursively traverses component and subcomponents. Returns sequence
of same. If name is passed, only components with name will be returned.
"""
if name is not None:
name = name.upper()
return self._walk(name)
#####################
# Generation
def property_items(self, recursive=True, sorted=True):
"""Returns properties in this component and subcomponents as:
[(name, value), ...]
"""
vText = types_factory['text']
properties = [('BEGIN', vText(self.name).to_ical())]
if sorted:
property_names = self.sorted_keys()
else:
property_names = self.keys()
for name in property_names:
values = self[name]
if isinstance(values, list):
# normally one property is one line
for value in values:
properties.append((name, value))
else:
properties.append((name, values))
if recursive:
# recursion is fun!
for subcomponent in self.subcomponents:
properties += subcomponent.property_items(sorted=sorted)
properties.append(('END', vText(self.name).to_ical()))
return properties
@classmethod
def from_ical(cls, st, multiple=False):
"""Populates the component recursively from a string.
"""
stack = [] # a stack of components
comps = []
for line in Contentlines.from_ical(st): # raw parsing
if not line:
continue
try:
name, params, vals = line.parts()
except ValueError as e:
# if unable to parse a line within a component
# that ignores exceptions, mark the component
# as broken and skip the line. otherwise raise.
component = stack[-1] if stack else None
if not component or not component.ignore_exceptions:
raise
component.errors.append((None, str(e)))
continue
uname = name.upper()
# check for start of component
if uname == 'BEGIN':
# try and create one of the components defined in the spec,
# otherwise get a general Components for robustness.
c_name = vals.upper()
c_class = component_factory.get(c_name, Component)
# If component factory cannot resolve ``c_name``, the generic
# ``Component`` class is used which does not have the name set.
# That's opposed to the usage of ``cls``, which represents a
# more concrete subclass with a name set (e.g. VCALENDAR).
component = c_class()
if not getattr(component, 'name', ''): # undefined components
component.name = c_name
stack.append(component)
# check for end of event
elif uname == 'END':
# we are done adding properties to this component
# so pop it from the stack and add it to the new top.
component = stack.pop()
if not stack: # we are at the end
comps.append(component)
else:
stack[-1].add_component(component)
if vals == 'VTIMEZONE' and \
'TZID' in component and \
component['TZID'] not in pytz.all_timezones and \
component['TZID'] not in _timezone_cache:
_timezone_cache[component['TZID']] = component.to_tz()
# we are adding properties to the current top of the stack
else:
factory = types_factory.for_property(name)
component = stack[-1] if stack else None
if not component:
raise ValueError(f'Property "{name}" does not have a parent component.')
datetime_names = ('DTSTART', 'DTEND', 'RECURRENCE-ID', 'DUE',
'FREEBUSY', 'RDATE', 'EXDATE')
try:
if name in datetime_names and 'TZID' in params:
vals = factory(factory.from_ical(vals, params['TZID']))
else:
vals = factory(factory.from_ical(vals))
except ValueError as e:
if not component.ignore_exceptions:
raise
component.errors.append((uname, str(e)))
else:
vals.params = params
component.add(name, vals, encode=0)
if multiple:
return comps
if len(comps) > 1:
raise ValueError(cls._format_error(
'Found multiple components where only one is allowed', st))
if len(comps) < 1:
raise ValueError(cls._format_error(
'Found no components where exactly one is required', st))
return comps[0]
def _format_error(error_description, bad_input, elipsis='[...]'):
# there's three character more in the error, ie. ' ' x2 and a ':'
max_error_length = 100 - 3
if len(error_description) + len(bad_input) + len(elipsis) > max_error_length:
truncate_to = max_error_length - len(error_description) - len(elipsis)
return f'{error_description}: {bad_input[:truncate_to]} {elipsis}'
else:
return f'{error_description}: {bad_input}'
def content_line(self, name, value, sorted=True):
"""Returns property as content line.
"""
params = getattr(value, 'params', Parameters())
return Contentline.from_parts(name, params, value, sorted=sorted)
def content_lines(self, sorted=True):
"""Converts the Component and subcomponents into content lines.
"""
contentlines = Contentlines()
for name, value in self.property_items(sorted=sorted):
cl = self.content_line(name, value, sorted=sorted)
contentlines.append(cl)
contentlines.append('') # remember the empty string in the end
return contentlines
def to_ical(self, sorted=True):
'''
:param sorted: Whether parameters and properties should be
lexicographically sorted.
'''
content_lines = self.content_lines(sorted=sorted)
return content_lines.to_ical()
def __repr__(self):
"""String representation of class with all of it's subcomponents.
"""
subs = ', '.join(str(it) for it in self.subcomponents)
return f"{self.name or type(self).__name__}({dict(self)}{', ' + subs if subs else ''})"
#######################################
# components defined in RFC 5545
class Event(Component):
name = 'VEVENT'
canonical_order = (
'SUMMARY', 'DTSTART', 'DTEND', 'DURATION', 'DTSTAMP',
'UID', 'RECURRENCE-ID', 'SEQUENCE', 'RRULE', 'RDATE',
'EXDATE',
)
required = ('UID', 'DTSTAMP',)
singletons = (
'CLASS', 'CREATED', 'DESCRIPTION', 'DTSTART', 'GEO', 'LAST-MODIFIED',
'LOCATION', 'ORGANIZER', 'PRIORITY', 'DTSTAMP', 'SEQUENCE', 'STATUS',
'SUMMARY', 'TRANSP', 'URL', 'RECURRENCE-ID', 'DTEND', 'DURATION',
'UID', 'CATEGORIES',
)
exclusive = ('DTEND', 'DURATION',)
multiple = (
'ATTACH', 'ATTENDEE', 'COMMENT', 'CONTACT', 'EXDATE',
'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
ignore_exceptions = True
class Todo(Component):
name = 'VTODO'
required = ('UID', 'DTSTAMP',)
singletons = (
'CLASS', 'COMPLETED', 'CREATED', 'DESCRIPTION', 'DTSTAMP', 'DTSTART',
'GEO', 'LAST-MODIFIED', 'LOCATION', 'ORGANIZER', 'PERCENT-COMPLETE',
'PRIORITY', 'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID',
'URL', 'DUE', 'DURATION',
)
exclusive = ('DUE', 'DURATION',)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
class Journal(Component):
name = 'VJOURNAL'
required = ('UID', 'DTSTAMP',)
singletons = (
'CLASS', 'CREATED', 'DTSTART', 'DTSTAMP', 'LAST-MODIFIED', 'ORGANIZER',
'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID', 'URL',
)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'RELATED', 'RDATE', 'RRULE', 'RSTATUS', 'DESCRIPTION',
)
class FreeBusy(Component):
name = 'VFREEBUSY'
required = ('UID', 'DTSTAMP',)
singletons = (
'CONTACT', 'DTSTART', 'DTEND', 'DTSTAMP', 'ORGANIZER',
'UID', 'URL',
)
multiple = ('ATTENDEE', 'COMMENT', 'FREEBUSY', 'RSTATUS',)
class Timezone(Component):
name = 'VTIMEZONE'
canonical_order = ('TZID',)
required = ('TZID',) # it also requires one of components DAYLIGHT and STANDARD
singletons = ('TZID', 'LAST-MODIFIED', 'TZURL',)
@staticmethod
def _extract_offsets(component, tzname):
"""extract offsets and transition times from a VTIMEZONE component
:param component: a STANDARD or DAYLIGHT component
:param tzname: the name of the zone
"""
offsetfrom = component['TZOFFSETFROM'].td
offsetto = component['TZOFFSETTO'].td
dtstart = component['DTSTART'].dt
# offsets need to be rounded to the next minute, we might loose up
# to 30 seconds accuracy, but it can't be helped (datetime
# supposedly cannot handle smaller offsets)
offsetto_s = int((offsetto.seconds + 30) / 60) * 60
offsetto = timedelta(days=offsetto.days, seconds=offsetto_s)
offsetfrom_s = int((offsetfrom.seconds + 30) / 60) * 60
offsetfrom = timedelta(days=offsetfrom.days, seconds=offsetfrom_s)
# expand recurrences
if 'RRULE' in component:
# to be paranoid about correct weekdays
# evaluate the rrule with the current offset
tzi = dateutil.tz.tzoffset ("(offsetfrom)", offsetfrom)
rrstart = dtstart.replace (tzinfo=tzi)
rrulestr = component['RRULE'].to_ical().decode('utf-8')
rrule = dateutil.rrule.rrulestr(rrulestr, dtstart=rrstart)
if not {'UNTIL', 'COUNT'}.intersection(component['RRULE'].keys()):
# pytz.timezones don't know any transition dates after 2038
# either
rrule._until = datetime(2038, 12, 31, tzinfo=pytz.UTC)
# constructing the pytz-timezone requires UTC transition times.
# here we construct local times without tzinfo, the offset to UTC
# gets subtracted in to_tz().
transtimes = [dt.replace (tzinfo=None) for dt in rrule]
# or rdates
elif 'RDATE' in component:
if not isinstance(component['RDATE'], list):
rdates = [component['RDATE']]
else:
rdates = component['RDATE']
transtimes = [dtstart] + [leaf.dt for tree in rdates for
leaf in tree.dts]
else:
transtimes = [dtstart]
transitions = [(transtime, offsetfrom, offsetto, tzname) for
transtime in set(transtimes)]
if component.name == 'STANDARD':
is_dst = 0
elif component.name == 'DAYLIGHT':
is_dst = 1
return is_dst, transitions
@staticmethod
def _make_unique_tzname(tzname, tznames):
"""
:param tzname: Candidate tzname
:param tznames: Other tznames
"""
# TODO better way of making sure tznames are unique
while tzname in tznames:
tzname += '_1'
tznames.add(tzname)
return tzname
def to_tz(self):
"""convert this VTIMEZONE component to a pytz.timezone object
"""
try:
zone = str(self['TZID'])
except UnicodeEncodeError:
zone = self['TZID'].encode('ascii', 'replace')
transitions = []
dst = {}
tznames = set()
for component in self.walk():
if type(component) == Timezone:
continue
assert isinstance(component['DTSTART'].dt, datetime), (
"VTIMEZONEs sub-components' DTSTART must be of type datetime, not date"
)
try:
tzname = str(component['TZNAME'])
except UnicodeEncodeError:
tzname = component['TZNAME'].encode('ascii', 'replace')
tzname = self._make_unique_tzname(tzname, tznames)
except KeyError:
# for whatever reason this is str/unicode
tzname = f"{zone}_{component['DTSTART'].to_ical().decode('utf-8')}_" + \
f"{component['TZOFFSETFROM'].to_ical()}_" + \
f"{component['TZOFFSETTO'].to_ical()}"
tzname = self._make_unique_tzname(tzname, tznames)
dst[tzname], component_transitions = self._extract_offsets(
component, tzname
)
transitions.extend(component_transitions)
transitions.sort()
transition_times = [
transtime - osfrom for transtime, osfrom, _, _ in transitions
]
# transition_info is a list with tuples in the format
# (utcoffset, dstoffset, name)
# dstoffset = 0, if current transition is to standard time
# = this_utcoffset - prev_standard_utcoffset, otherwise
transition_info = []
for num, (transtime, osfrom, osto, name) in enumerate(transitions):
dst_offset = False
if not dst[name]:
dst_offset = timedelta(seconds=0)
else:
# go back in time until we find a transition to dst
for index in range(num - 1, -1, -1):
if not dst[transitions[index][3]]: # [3] is the name
dst_offset = osto - transitions[index][2] # [2] is osto # noqa
break
# when the first transition is to dst, we didn't find anything
# in the past, so we have to look into the future
if not dst_offset:
for index in range(num, len(transitions)):
if not dst[transitions[index][3]]: # [3] is the name
dst_offset = osto - transitions[index][2] # [2] is osto # noqa
break
assert dst_offset is not False
transition_info.append((osto, dst_offset, name))
cls = type(zone, (DstTzInfo,), {
'zone': zone,
'_utc_transition_times': transition_times,
'_transition_info': transition_info
})
return cls()
class TimezoneStandard(Component):
name = 'STANDARD'
required = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM')
singletons = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM',)
multiple = ('COMMENT', 'RDATE', 'TZNAME', 'RRULE', 'EXDATE')
class TimezoneDaylight(Component):
name = 'DAYLIGHT'
required = TimezoneStandard.required
singletons = TimezoneStandard.singletons
multiple = TimezoneStandard.multiple
class Alarm(Component):
name = 'VALARM'
# some properties MAY/MUST/MUST NOT appear depending on ACTION value
required = ('ACTION', 'TRIGGER',)
singletons = (
'ATTACH', 'ACTION', 'DESCRIPTION', 'SUMMARY', 'TRIGGER',
'DURATION', 'REPEAT',
)
inclusive = (('DURATION', 'REPEAT',), ('SUMMARY', 'ATTENDEE',))
multiple = ('ATTENDEE', 'ATTACH')
class Calendar(Component):
"""This is the base object for an iCalendar file.
"""
name = 'VCALENDAR'
canonical_order = ('VERSION', 'PRODID', 'CALSCALE', 'METHOD',)
required = ('PRODID', 'VERSION', )
singletons = ('PRODID', 'VERSION', 'CALSCALE', 'METHOD')
# These are read only singleton, so one instance is enough for the module
types_factory = TypesFactory()
component_factory = ComponentFactory()
|
47578e8c9a4c34c9af096881f360db701ba24c9d
|
576764ad37667f8da2c63aaa1a9f96da211795a6
|
/tests/forte/data/datapack_type_infer_test.py
|
c2949f896adaa2c4e31102b30bbfbea84c815208
|
[
"Apache-2.0"
] |
permissive
|
asyml/forte
|
96f852601647836dda3bccf3bd7900b9d10e6fcb
|
13e50aebe2afd79a7a8b3c01f0bb2568addea54f
|
refs/heads/master
| 2023-04-09T17:52:31.203644
| 2023-04-06T15:04:49
| 2023-04-06T15:04:49
| 201,518,876
| 233
| 73
|
Apache-2.0
| 2023-04-06T15:04:51
| 2019-08-09T18:12:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
datapack_type_infer_test.py
|
import unittest
from ddt import data, ddt
from forte.data.caster import MultiPackBoxer, MultiPackUnboxer
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.data.readers.misc_readers import RawPackReader, RawMultiPackReader
from forte.data.readers.multipack_sentence_reader import MultiPackSentenceReader
from forte.data.readers.multipack_terminal_reader import MultiPackTerminalReader
from forte.data.readers.plaintext_reader import PlainTextReader
@ddt
class DataPackTypeInferTest(unittest.TestCase):
@data(
PlainTextReader,
RawPackReader,
)
def test_datapack_reader(self, component):
reader = component()
self.assertTrue(reader.pack_type() is DataPack)
@data(
MultiPackSentenceReader,
MultiPackTerminalReader,
RawMultiPackReader,
)
def test_multipack_reader(self, component):
reader = component()
self.assertTrue(reader.pack_type() is MultiPack)
@data(
MultiPackBoxer,
)
def test_multipack_boxer(self, component):
caster = component()
self.assertTrue(caster.input_pack_type() is DataPack)
self.assertTrue(caster.output_pack_type() is MultiPack)
@data(
MultiPackUnboxer,
)
def test_multipack_unboxer(self, component):
caster = component()
self.assertTrue(caster.input_pack_type() is MultiPack)
self.assertTrue(caster.output_pack_type() is DataPack)
|
e198cc386a775751d9de1fbdba4acf6f48e18287
|
45ba55b4fbdaf1657fde92beaeba4f173265afcd
|
/tests/extensions/test_custom_objects_for_setting_attribute.py
|
48a4b0ea4962ea9e4f40a4dbedf65dcdb9f9a27a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
strawberry-graphql/strawberry
|
af96afd4edd1788c59e150597a12501fbc7bf444
|
6d86d1c08c1244e00535840d9d87925431bc6a1c
|
refs/heads/main
| 2023-08-30T03:34:12.929874
| 2023-08-24T12:01:09
| 2023-08-24T12:01:09
| 162,690,887
| 3,408
| 529
|
MIT
| 2023-09-14T21:49:44
| 2018-12-21T08:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,759
|
py
|
test_custom_objects_for_setting_attribute.py
|
import pytest
from strawberry.extensions.tracing.opentelemetry import OpenTelemetryExtension
@pytest.fixture
def otel_ext():
return OpenTelemetryExtension()
class SimpleObject:
def __init__(self, value):
self.value = value
def __str__(self):
return f"SimpleObject({self.value})"
class ComplexObject:
def __init__(self, simple_object, value):
self.simple_object = simple_object
self.value = value
def __str__(self):
return f"ComplexObject({self.simple_object!s}, {self.value})"
def test_convert_complex_number(otel_ext):
value = 3 + 4j
assert otel_ext.convert_to_allowed_types(value) == "(3+4j)"
def test_convert_range(otel_ext):
value = range(3)
assert otel_ext.convert_to_allowed_types(value) == "0, 1, 2"
def test_convert_bytearray(otel_ext):
value = bytearray(b"hello world")
assert otel_ext.convert_to_allowed_types(value) == b"hello world"
def test_convert_memoryview(otel_ext):
value = memoryview(b"hello world")
assert otel_ext.convert_to_allowed_types(value) == b"hello world"
def test_convert_set(otel_ext):
value = {1, 2, 3, 4}
converted_value = otel_ext.convert_to_allowed_types(value)
assert set(converted_value.strip("{}").split(", ")) == {"1", "2", "3", "4"}
def test_convert_frozenset(otel_ext):
value = frozenset([1, 2, 3, 4])
converted_value = otel_ext.convert_to_allowed_types(value)
assert set(converted_value.strip("{}").split(", ")) == {"1", "2", "3", "4"}
def test_convert_complex_object_with_simple_object(otel_ext):
simple_obj = SimpleObject(42)
complex_obj = ComplexObject(simple_obj, 99)
assert (
otel_ext.convert_to_allowed_types(complex_obj)
== "ComplexObject(SimpleObject(42), 99)"
)
def test_convert_dictionary(otel_ext):
value = {
"int": 1,
"float": 3.14,
"bool": True,
"str": "hello",
"list": [1, 2, 3],
"tuple": (4, 5, 6),
"simple_object": SimpleObject(42),
}
expected = (
"{int: 1, "
"float: 3.14, "
"bool: True, "
"str: hello, "
"list: 1, 2, 3, "
"tuple: 4, 5, 6, "
"simple_object: SimpleObject(42)}"
)
assert otel_ext.convert_to_allowed_types(value) == expected
def test_convert_bool(otel_ext):
assert otel_ext.convert_to_allowed_types(True) is True
assert otel_ext.convert_to_allowed_types(False) is False
def test_convert_str(otel_ext):
assert otel_ext.convert_to_allowed_types("hello") == "hello"
def test_convert_bytes(otel_ext):
assert otel_ext.convert_to_allowed_types(b"hello") == b"hello"
def test_convert_int(otel_ext):
assert otel_ext.convert_to_allowed_types(42) == 42
def test_convert_float(otel_ext):
assert otel_ext.convert_to_allowed_types(3.14) == 3.14
def test_convert_simple_object(otel_ext):
obj = SimpleObject(42)
assert otel_ext.convert_to_allowed_types(obj) == "SimpleObject(42)"
def test_convert_list_of_basic_types(otel_ext):
value = [1, "hello", 3.14, True, False]
assert otel_ext.convert_to_allowed_types(value) == "1, hello, 3.14, True, False"
def test_convert_list_of_mixed_types(otel_ext):
value = [1, "hello", 3.14, SimpleObject(42)]
assert (
otel_ext.convert_to_allowed_types(value) == "1, hello, 3.14, SimpleObject(42)"
)
def test_convert_tuple_of_basic_types(otel_ext):
value = (1, "hello", 3.14, True, False)
assert otel_ext.convert_to_allowed_types(value) == "1, hello, 3.14, True, False"
def test_convert_tuple_of_mixed_types(otel_ext):
value = (1, "hello", 3.14, SimpleObject(42))
assert (
otel_ext.convert_to_allowed_types(value) == "1, hello, 3.14, SimpleObject(42)"
)
|
707621419f6a581693712630fbf9cc4d983499ba
|
5a4ff4e0a07ac57eead862a21d33fd441d89f05b
|
/proximal/prox_fns/__init__.py
|
a3ba9858774b3ff4381e8e750e55aefa8dc54e9a
|
[
"MIT"
] |
permissive
|
comp-imaging/ProxImaL
|
758b25a41fb008a49236ed893b38b244968a0afa
|
3437814f70b45669aab5a44f7da02ee2da5751db
|
refs/heads/master
| 2023-08-18T15:22:10.030926
| 2023-07-23T17:26:31
| 2023-08-09T18:16:14
| 63,992,576
| 114
| 33
|
MIT
| 2023-09-13T21:42:35
| 2016-07-23T01:12:16
|
Python
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
__init__.py
|
from .prox_fn import ProxFn
from .diff_fn import diff_fn
from .nonneg import nonneg, weighted_nonneg, masked_nonneg
from .norm1 import norm1, weighted_norm1
from .sum_squares import sum_squares, weighted_sum_squares, least_squares, lsqr_options, cg_options
from .group_norm1 import group_norm1, weighted_group_norm1
from .poisson_norm import poisson_norm, weighted_poisson_norm
from .patch_NLM import patch_NLM
from .sum_entries import sum_entries, zero_prox
# from .matlab_external import matlab_external
|
6cfb8f790c345904cb3c7679ab19a3cb6029dbba
|
19f76203bbd176fe5a5ff7a2470ada9fb9af7c39
|
/taskflow/types/graph.py
|
5d6912797ee9c67d6f626d229b4108c7b4753733
|
[
"Apache-2.0"
] |
permissive
|
openstack/taskflow
|
19f1c614f2fc175b6e57ac9280dc510e402c9f56
|
3b40c04594fb49ab17f8834f2f0df294a1f3e996
|
refs/heads/master
| 2023-09-04T23:38:07.154364
| 2023-08-16T13:57:37
| 2023-08-16T13:57:37
| 16,626,961
| 338
| 75
|
Apache-2.0
| 2018-10-29T14:30:27
| 2014-02-07T20:45:53
|
Python
|
UTF-8
|
Python
| false
| false
| 10,504
|
py
|
graph.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import io
import os
import networkx as nx
from networkx.drawing import nx_pydot
def _common_format(g, edge_notation):
lines = []
lines.append("Name: %s" % g.name)
lines.append("Type: %s" % type(g).__name__)
lines.append("Frozen: %s" % nx.is_frozen(g))
lines.append("Density: %0.3f" % nx.density(g))
lines.append("Nodes: %s" % g.number_of_nodes())
for n, n_data in g.nodes(data=True):
if n_data:
lines.append(" - %s (%s)" % (n, n_data))
else:
lines.append(" - %s" % n)
lines.append("Edges: %s" % g.number_of_edges())
for (u, v, e_data) in g.edges(data=True):
if e_data:
lines.append(" %s %s %s (%s)" % (u, edge_notation, v, e_data))
else:
lines.append(" %s %s %s" % (u, edge_notation, v))
return lines
class Graph(nx.Graph):
"""A graph subclass with useful utility functions."""
def __init__(self, incoming_graph_data=None, name=''):
super(Graph, self).__init__(incoming_graph_data=incoming_graph_data,
name=name)
self.frozen = False
def freeze(self):
"""Freezes the graph so that no more mutations can occur."""
if not self.frozen:
nx.freeze(self)
return self
def export_to_dot(self):
"""Exports the graph to a dot format (requires pydot library)."""
return nx_pydot.to_pydot(self).to_string()
def pformat(self):
"""Pretty formats your graph into a string."""
return os.linesep.join(_common_format(self, "<->"))
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v."""
if attr_dict is not None:
return super(Graph, self).add_edge(u, v, **attr_dict)
return super(Graph, self).add_edge(u, v, **attr)
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes."""
if attr_dict is not None:
return super(Graph, self).add_node(n, **attr_dict)
return super(Graph, self).add_node(n, **attr)
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return Graph()
class DiGraph(nx.DiGraph):
"""A directed graph subclass with useful utility functions."""
def __init__(self, incoming_graph_data=None, name=''):
super(DiGraph, self).__init__(incoming_graph_data=incoming_graph_data,
name=name)
self.frozen = False
def freeze(self):
"""Freezes the graph so that no more mutations can occur."""
if not self.frozen:
nx.freeze(self)
return self
def get_edge_data(self, u, v, default=None):
"""Returns a *copy* of the edge attribute dictionary between (u, v).
NOTE(harlowja): this differs from the networkx get_edge_data() as that
function does not return a copy (but returns a reference to the actual
edge data).
"""
try:
return dict(self.adj[u][v])
except KeyError:
return default
def topological_sort(self):
"""Return a list of nodes in this graph in topological sort order."""
return nx.topological_sort(self)
def pformat(self):
"""Pretty formats your graph into a string.
This pretty formatted string representation includes many useful
details about your graph, including; name, type, frozeness, node count,
nodes, edge count, edges, graph density and graph cycles (if any).
"""
lines = _common_format(self, "->")
cycles = list(nx.cycles.recursive_simple_cycles(self))
lines.append("Cycles: %s" % len(cycles))
for cycle in cycles:
buf = io.StringIO()
buf.write("%s" % (cycle[0]))
for i in range(1, len(cycle)):
buf.write(" --> %s" % (cycle[i]))
buf.write(" --> %s" % (cycle[0]))
lines.append(" %s" % buf.getvalue())
return os.linesep.join(lines)
def export_to_dot(self):
"""Exports the graph to a dot format (requires pydot library)."""
return nx_pydot.to_pydot(self).to_string()
def is_directed_acyclic(self):
"""Returns if this graph is a DAG or not."""
return nx.is_directed_acyclic_graph(self)
def no_successors_iter(self):
"""Returns an iterator for all nodes with no successors."""
for n in self.nodes:
if not len(list(self.successors(n))):
yield n
def no_predecessors_iter(self):
"""Returns an iterator for all nodes with no predecessors."""
for n in self.nodes:
if not len(list(self.predecessors(n))):
yield n
def bfs_predecessors_iter(self, n):
"""Iterates breadth first over *all* predecessors of a given node.
This will go through the nodes predecessors, then the predecessor nodes
predecessors and so on until no more predecessors are found.
NOTE(harlowja): predecessor cycles (if they exist) will not be iterated
over more than once (this prevents infinite iteration).
"""
visited = set([n])
queue = collections.deque(self.predecessors(n))
while queue:
pred = queue.popleft()
if pred not in visited:
yield pred
visited.add(pred)
for pred_pred in self.predecessors(pred):
if pred_pred not in visited:
queue.append(pred_pred)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v."""
if attr_dict is not None:
return super(DiGraph, self).add_edge(u, v, **attr_dict)
return super(DiGraph, self).add_edge(u, v, **attr)
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes."""
if attr_dict is not None:
return super(DiGraph, self).add_node(n, **attr_dict)
return super(DiGraph, self).add_node(n, **attr)
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return DiGraph()
class OrderedDiGraph(DiGraph):
"""A directed graph subclass with useful utility functions.
This derivative retains node, edge, insertion and iteration
ordering (so that the iteration order matches the insertion
order).
"""
node_dict_factory = collections.OrderedDict
adjlist_outer_dict_factory = collections.OrderedDict
adjlist_inner_dict_factory = collections.OrderedDict
edge_attr_dict_factory = collections.OrderedDict
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return OrderedDiGraph()
class OrderedGraph(Graph):
"""A graph subclass with useful utility functions.
This derivative retains node, edge, insertion and iteration
ordering (so that the iteration order matches the insertion
order).
"""
node_dict_factory = collections.OrderedDict
adjlist_outer_dict_factory = collections.OrderedDict
adjlist_inner_dict_factory = collections.OrderedDict
edge_attr_dict_factory = collections.OrderedDict
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return OrderedGraph()
def merge_graphs(graph, *graphs, **kwargs):
"""Merges a bunch of graphs into a new graph.
If no additional graphs are provided the first graph is
returned unmodified otherwise the merged graph is returned.
"""
tmp_graph = graph
allow_overlaps = kwargs.get('allow_overlaps', False)
overlap_detector = kwargs.get('overlap_detector')
if overlap_detector is not None and not callable(overlap_detector):
raise ValueError("Overlap detection callback expected to be callable")
elif overlap_detector is None:
overlap_detector = (lambda to_graph, from_graph:
len(to_graph.subgraph(from_graph.nodes)))
for g in graphs:
# This should ensure that the nodes to be merged do not already exist
# in the graph that is to be merged into. This could be problematic if
# there are duplicates.
if not allow_overlaps:
# Attempt to induce a subgraph using the to be merged graphs nodes
# and see if any graph results.
overlaps = overlap_detector(graph, g)
if overlaps:
raise ValueError("Can not merge graph %s into %s since there "
"are %s overlapping nodes (and we do not "
"support merging nodes)" % (g, graph,
overlaps))
graph = nx.algorithms.compose(graph, g)
# Keep the first graphs name.
if graphs:
graph.name = tmp_graph.name
return graph
|
079e0d8c14f1752bd8a70fc23d4426739619c61b
|
304b690ca83b82ec52dae5f53146981860f799e1
|
/BasicLevel_Python/1009 说反话 (20 分).py
|
d0f6c480fa5df341b33437bc20d7d8f339354f9f
|
[] |
no_license
|
liuchuo/PAT
|
53e6a01a5c54ebe0d8c575e10173d16b7d7cc91d
|
fe7a01b3ef233ac9f03b732b08947a85efe44a2a
|
refs/heads/master
| 2023-08-03T20:42:01.980531
| 2022-08-22T04:35:31
| 2022-08-22T04:35:31
| 49,113,721
| 3,713
| 1,168
| null | 2023-05-11T15:36:10
| 2016-01-06T05:06:34
|
C++
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
1009 说反话 (20 分).py
|
i=input().split()
a=(i[::-1])
b=''
for c in a:
b=b+c+' '
print(b[0:-1])
|
aefadd6d2ce4cf56175000f83f93cb30e7288506
|
98dbb9cd9523809b4ee0e6b92334fa6a2a6af2a3
|
/bingads/v13/bulk/bulk_service_manager.py
|
1f9b33b47c09a12745caecd97fdd4b61a4a91a75
|
[
"MIT"
] |
permissive
|
BingAds/BingAds-Python-SDK
|
a2f9b0c099b574a4495d0052218f263af55cdb32
|
373a586402bf24af7137b7c49321dbc70c859fce
|
refs/heads/main
| 2023-07-27T15:31:41.354708
| 2023-07-10T03:21:03
| 2023-07-10T03:21:03
| 31,927,550
| 105
| 182
|
NOASSERTION
| 2023-09-04T06:51:20
| 2015-03-09T23:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 21,490
|
py
|
bulk_service_manager.py
|
import tempfile
import uuid
import codecs
import csv
import io
from .bulk_operation import *
from .upload_parameters import *
from .file_reader import *
from .file_writer import *
from bingads.manifest import *
from bingads.service_client import ServiceClient
from bingads.authorization import *
from bingads.util import _TimeHelper
from bingads.exceptions import TimeoutException
class BulkServiceManager:
SYNC_THRESHOLD = 1000
BOMLEN = len(codecs.BOM_UTF8)
""" Provides high level methods for uploading and downloading entities using the Bulk API functionality.
Also provides methods for submitting upload or download operations.
*Example:*
:func:`download_file` will submit the download request to the bulk service,
poll until the status is completed (or returns an error), and downloads the file locally.
If instead you want to manage the low level details you would first call :func:`submit_download`,
wait for the results file to be prepared using either :meth:`.BulkDownloadOperation.get_status`
or :meth:`.BulkDownloadOperation.track`, and then download the file with the
:meth:`.BulkOperation.download_result_file` method.
"""
def __init__(self, authorization_data, poll_interval_in_milliseconds=5000, environment='production', working_directory=None, **suds_options):
""" Initialize a new instance of this class.
:param authorization_data: Represents a user who intends to access the corresponding customer and account.
:type authorization_data: AuthorizationData
:param environment: (optional) Represents which API environment to use, default is `production`, you can also pass `sandbox` in
:type environment: str
:param poll_interval_in_milliseconds: (optional) The time interval in milliseconds between two status polling attempts.
The default value is 15000 milliseconds.
:type poll_interval_in_milliseconds: int
:param working_directory: (optional) Directory for storing temporary files needed for some operations
(for example :func:`upload_entities` creates a temporary upload file).
:param suds_options: The suds options need to pass to suds client
"""
self._environment = environment
self._service_client = ServiceClient('BulkService', 13, authorization_data, environment, **suds_options)
self._authorization_data = authorization_data
self._poll_interval_in_milliseconds = poll_interval_in_milliseconds
self._working_directory = os.path.join(tempfile.gettempdir(), WORKING_NAME)
if working_directory is not None:
self._working_directory = working_directory
# make sure the working directory exists or create it.
if not os.path.exists(self._working_directory):
os.makedirs(self._working_directory)
self._suds_options = suds_options
def download_file(self, download_parameters, progress=None):
""" Downloads the specified Bulk entities to a local file.
:param download_parameters: Determines various download parameters, for example where the file should be downloaded.
:type download_parameters: DownloadParameters
:param progress: (optional) Tracking the percent complete progress information for the bulk operation.
:type progress: BulkOperationProgressInfo -> None
:return: The downloaded local bulk file path.
:rtype: str
"""
start_timestamp = _TimeHelper.get_current_time_milliseconds()
operation = self.submit_download(download_parameters._submit_download_parameter)
try:
operation.track(progress, download_parameters.timeout_in_milliseconds)
except TimeoutException:
raise BulkDownloadException("Bulk file download tracking status timeout.")
result_file_directory = self.working_directory
if download_parameters.result_file_directory is not None:
result_file_directory = download_parameters.result_file_directory
download_result_file_timeout = _TimeHelper.get_remaining_time_milliseconds_with_min_value(start_timestamp, download_parameters.timeout_in_milliseconds)
result_file_path = operation.download_result_file(
result_file_directory=result_file_directory,
result_file_name=download_parameters.result_file_name,
decompress=download_parameters.decompress_result_file,
overwrite=download_parameters.overwrite_result_file,
timeout_in_milliseconds=download_result_file_timeout,
)
return result_file_path
def download_entities(self, download_parameters, progress=None):
""" Downloads the specified Bulk entities.
:param download_parameters: Determines various download parameters, for example where the file should be downloaded.
:type download_parameters: DownloadParameters
:param progress: (optional) Tracking the percent complete progress information for the bulk operation.
:type progress: BulkOperationProgressInfo -> None
:return: Bulk entity generator.
:rtype: generator[BulkEntity]
"""
result_file_path = self.download_file(download_parameters, progress)
result_file_type = ResultFileType.full_download \
if download_parameters.last_sync_time_in_utc is None \
else ResultFileType.partial_download
with BulkFileReader(
file_path=result_file_path,
result_file_type=result_file_type,
file_type=download_parameters.file_type,
) as reader:
for entity in reader:
yield entity
def upload_file(self, file_upload_parameters, progress=None):
""" Uploads the specified Bulk file.
:param file_upload_parameters: Determines various upload parameters.
:type file_upload_parameters: FileUploadParameters
:param progress: (optional) Tracking the percent complete progress information for the bulk operation.
:type progress: BulkOperationProgressInfo -> None
:return: The download local bulk file path.
:rtype: str
"""
file_upload_parameters._submit_upload_parameters.timeout_in_milliseconds = file_upload_parameters.timeout_in_milliseconds
operation = self.submit_upload(file_upload_parameters._submit_upload_parameters)
return self.download_upload_result(operation, file_upload_parameters, progress)
def download_upload_result(self, operation, file_upload_parameters, progress=None):
start_timestamp = _TimeHelper.get_current_time_milliseconds()
upload_operation_timeout = _TimeHelper.get_remaining_time_milliseconds_with_min_value(start_timestamp, file_upload_parameters.timeout_in_milliseconds)
try:
operation.track(progress, upload_operation_timeout)
except TimeoutException:
raise BulkUploadException("Bulk file upload tracking status timeout.")
result_file_directory = self.working_directory
if file_upload_parameters.result_file_directory is not None:
result_file_directory = file_upload_parameters.result_file_directory
download_result_file_timeout = _TimeHelper.get_remaining_time_milliseconds_with_min_value(start_timestamp, file_upload_parameters.timeout_in_milliseconds)
result_file_path = operation.download_result_file(
result_file_directory=result_file_directory,
result_file_name=file_upload_parameters.result_file_name,
decompress=file_upload_parameters.decompress_result_file,
overwrite=file_upload_parameters.overwrite_result_file,
timeout_in_milliseconds=download_result_file_timeout,
)
return result_file_path
def need_to_try_upload_entity_records_sync_first(self, entity_upload_parameters):
return len(entity_upload_parameters.entities) <= BulkServiceManager.SYNC_THRESHOLD
def bulkupload_entities(self, entity_upload_parameters, tmp_file, progress=None):
""" Uploads the specified Bulk entities in async way.
:param entity_upload_parameters: Determines various upload parameters, for example what entities to upload.
:type entity_upload_parameters: EntityUploadParameters
:param tmp_file: The temp file path that contains the content to upload
:type tmp_file: string
:param progress: (optional) Tracking the percent complete progress information for the bulk operation.
:type progress: BulkOperationProgressInfo -> None
:return: Bulk entity generator.
:rtype: generator[BulkEntity]
"""
file_upload_parameters = FileUploadParameters(
upload_file_path=tmp_file,
result_file_directory=entity_upload_parameters.result_file_directory,
result_file_name=entity_upload_parameters.result_file_name,
overwrite_result_file=entity_upload_parameters.overwrite_result_file,
response_mode=entity_upload_parameters.response_mode,
compress_upload_file=True,
)
result_file_path = self.upload_file(
file_upload_parameters=file_upload_parameters,
progress=progress,
)
with BulkFileReader(result_file_path, result_file_type=ResultFileType.upload) as reader:
for entity in reader:
yield entity
def bulkupload_entitie_records(self, entity_upload_parameters, tmp_file, progress=None):
""" Uploads the specified Bulk entities in sync way by UploadEntityRecords.
"""
records = self.service_client.factory.create("ns2:ArrayOfstring")
tmp_csv_file = io.open(tmp_file, encoding='utf-8-sig')
records.string = [x.strip() for x in tmp_csv_file.readlines()]
try:
#print(self.service_client)
response = self.service_client.UploadEntityRecords(
AccountId=self._authorization_data.account_id,
EntityRecords=records,
ResponseMode=entity_upload_parameters.response_mode
)
if self.need_to_fall_back_to_async(response):
headers = self.service_client.get_response_header()
operation = BulkUploadOperation(
request_id=response.RequestId,
authorization_data=self._authorization_data,
poll_interval_in_milliseconds=self._poll_interval_in_milliseconds,
environment=self._environment,
tracking_id=headers['TrackingId'] if 'TrackingId' in headers else None,
**self.suds_options
)
file_path = self.download_upload_result(operation, entity_upload_parameters, progress)
return self.read_result_from_bulk_file(file_path)
else:
return self.read_bulkupsert_response(response)
except Exception as ex:
if 'OperationNotSupported' == operation_errorcode_of_exception(ex):
return self.bulkupload_entities(entity_upload_parameters, tmp_file, progress)
else:
raise ex
def need_to_fall_back_to_async(self, response):
return response.RequestId is not None and \
len(response.RequestId) > 0 and \
response.RequestStatus == 'InProgress'
def read_result_from_bulk_file(self, result_bulk_file):
with BulkFileReader(result_bulk_file, result_file_type=ResultFileType.upload) as reader:
for entity in reader:
yield entity
def read_bulkupsert_response(self, response):
with BulkRowsReader(response.EntityRecords.string) as reader:
for entity in reader:
yield entity
def retry_with_BulkUpload(self, bulkupsert_response):
if bulkupsert_response.Errors is not None:
error_codes = [e.ErrorCode for e in bulkupsert_response.Errors]
return FailedBulkUpsertRetryBulkUploadInstead in error_codes
return False
def upload_entities(self, entity_upload_parameters, progress=None):
""" Uploads the specified Bulk entities.
:param entity_upload_parameters: Determines various upload parameters, for example what entities to upload.
:type entity_upload_parameters: EntityUploadParameters
:param progress: (optional) Tracking the percent complete progress information for the bulk operation.
:type progress: BulkOperationProgressInfo -> None
:return: Bulk entity generator.
:rtype: generator[BulkEntity]
"""
tmp_file = path.join(self.working_directory, '{0}.csv'.format(uuid.uuid1()))
with BulkFileWriter(tmp_file) as writer:
for entity in entity_upload_parameters.entities:
writer.write_entity(entity)
if (self.need_to_try_upload_entity_records_sync_first(entity_upload_parameters)):
return self.bulkupload_entitie_records(entity_upload_parameters, tmp_file, progress)
else:
return self.bulkupload_entities(entity_upload_parameters, tmp_file, progress)
def submit_download(self, submit_download_parameters):
""" Submits a download request to the Bing Ads bulk service with the specified parameters.
:param submit_download_parameters: Determines various download parameters, for example what entities to download.
:type submit_download_parameters: SubmitDownloadParameters
:return: The submitted download operation
:rtype: BulkDownloadOperation
"""
data_scope = None if submit_download_parameters.data_scope is None else ' '.join(
submit_download_parameters.data_scope)
download_file_type = submit_download_parameters.file_type
download_entities = self.service_client.factory.create('ArrayOfDownloadEntity')
download_entities.DownloadEntity = submit_download_parameters.download_entities
# entities = None if submit_download_parameters.entities is None else ' '.join(
# submit_download_parameters.entities)
format_version = BULK_FORMAT_VERSION_6
last_sync_time_in_utc = submit_download_parameters.last_sync_time_in_utc
if submit_download_parameters.campaign_ids is None:
response = self.service_client.DownloadCampaignsByAccountIds(
AccountIds={'long': [self._authorization_data.account_id]},
DataScope=data_scope,
DownloadFileType=download_file_type,
DownloadEntities=download_entities,
FormatVersion=format_version,
LastSyncTimeInUTC=last_sync_time_in_utc,
)
headers = self.service_client.get_response_header()
else:
response = self.service_client.DownloadCampaignsByCampaignIds(
Campaigns={
'CampaignScope': [
{'CampaignId': campaign_id, 'ParentAccountId': self._authorization_data.account_id}
for campaign_id in submit_download_parameters.campaign_ids
]
},
DataScope=data_scope,
DownloadFileType=download_file_type,
DownloadEntities=download_entities,
FormatVersion=format_version,
LastSyncTimeInUTC=last_sync_time_in_utc,
)
headers = self.service_client.get_response_header()
operation = BulkDownloadOperation(
request_id=response,
authorization_data=self._authorization_data,
poll_interval_in_milliseconds=self._poll_interval_in_milliseconds,
environment=self._environment,
tracking_id=headers['TrackingId'] if 'TrackingId' in headers else None,
**self.suds_options
)
return operation
def submit_upload(self, submit_upload_parameters):
""" Submits a request for a URL where a bulk upload file may be posted.
:param submit_upload_parameters:
:type submit_upload_parameters: SubmitUploadParameters
:return: The submitted upload operation.
:rtype: BulkUploadOperation
"""
response = self.service_client.GetBulkUploadUrl(
AccountId=self._authorization_data.account_id,
ResponseMode=submit_upload_parameters.response_mode,
)
headers = self.service_client.get_response_header()
request_id = response.RequestId
upload_url = response.UploadUrl
if submit_upload_parameters.rename_upload_file_to_match_request_id:
import os
dir = os.path.dirname(submit_upload_parameters.upload_file_path)
new_file_to_upload = os.path.join(dir, 'upload_' + request_id + '.csv')
os.rename(submit_upload_parameters.upload_file_path, new_file_to_upload)
submit_upload_parameters.upload_file_path = new_file_to_upload
self._upload_file_by_url(
url=upload_url,
upload_file_path=submit_upload_parameters.upload_file_path,
compress_upload_file=submit_upload_parameters.compress_upload_file,
)
operation = BulkUploadOperation(
request_id=request_id,
authorization_data=self._authorization_data,
poll_interval_in_milliseconds=self._poll_interval_in_milliseconds,
environment=self._environment,
tracking_id=headers['TrackingId'] if 'TrackingId' in headers else None,
**self.suds_options
)
return operation
def _upload_file_by_url(self, url, upload_file_path, compress_upload_file, timeout_in_milliseconds=None):
""" Upload bulk file specified in upload parameters to specified URL
:param url: The upload target URL.
:type url: str
:param upload_file_path: The fully qualified local path of the upload file.
:type upload_file_path: str
:param compress_upload_file: whether the upload file should be compressed before uploading.
:type compress_upload_file: bool
"""
_, ext = path.splitext(upload_file_path)
if compress_upload_file and ext != '.zip':
should_compress = True
else:
should_compress = False
try:
if should_compress:
name, ext = path.splitext(upload_file_path)
zip_file_path = os.path.join(self.working_directory, '{0}_{1}.zip'.format(name, uuid.uuid1()))
with contextlib.closing(zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)) as f:
f.write(upload_file_path)
upload_file_path = zip_file_path
headers = {
'DeveloperToken': self._authorization_data.developer_token,
'CustomerId': str(self._authorization_data.customer_id),
'AccountId': str(self._authorization_data.account_id),
'User-Agent': USER_AGENT,
}
self._authorization_data.authentication.enrich_headers(headers)
with open(upload_file_path, 'rb') as f:
name, ext = path.splitext(upload_file_path)
filename = '{0}{1}'.format(uuid.uuid1(), ext)
s = requests.Session()
s.mount('https://', TlsHttpAdapter())
timeout_seconds = None if timeout_in_milliseconds is None else timeout_in_milliseconds / 1000.0
try:
r = s.post(url, files={'file': (filename, f)}, verify=True, headers=headers, timeout=timeout_seconds)
except requests.Timeout as ex:
raise FileUploadException(ex)
r.raise_for_status()
except Exception as ex:
raise ex
finally:
if should_compress:
name, ext = path.splitext(upload_file_path)
zip_file_path = name + '.zip'
if path.exists(zip_file_path):
os.remove(zip_file_path)
@property
def service_client(self):
""" The internal bulk service client.
:rtype: ServiceClient
"""
return self._service_client
@property
def poll_interval_in_milliseconds(self):
""" The time interval in milliseconds between two status polling attempts.
:rtype: int
"""
return self._poll_interval_in_milliseconds
@poll_interval_in_milliseconds.setter
def poll_interval_in_milliseconds(self, poll_interval):
self._poll_interval_in_milliseconds = poll_interval
@property
def working_directory(self):
""" Directory for storing temporary files needed for some operations (for example :func:`upload_entities` creates a temporary upload file).
:rtype: str
"""
return self._working_directory
@working_directory.setter
def working_directory(self, value):
self._working_directory = value
@property
def suds_options(self):
""" suds option parameters
:return: dict
"""
return self._suds_options
@suds_options.setter
def suds_options(self, value):
self._suds_options = value
|
65faf6d1b9d2b7c2c348ad115c20fc840e0dcd4c
|
5c1746c4ae9f5eb4c94c9b3a70a4d3feb966ceda
|
/pcapkit/const/hip/esp_transform_suite.py
|
e9782ce8c45e9fdbd20a2c4f79eeeac2de3de635
|
[
"BSD-3-Clause"
] |
permissive
|
JarryShaw/PyPCAPKit
|
8b53c76cf54f2ef1a9e4d0a7aeb3d52605dc1d5a
|
a6fe49ec58f09e105bec5a00fb66d9b3f22730d9
|
refs/heads/main
| 2023-08-29T12:49:58.611378
| 2023-08-28T14:05:43
| 2023-08-28T14:05:43
| 109,791,841
| 204
| 29
|
BSD-3-Clause
| 2023-09-11T17:09:06
| 2017-11-07T05:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
esp_transform_suite.py
|
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long,consider-using-f-string
"""ESP Transform Suite IDs
=============================
.. module:: pcapkit.const.hip.esp_transform_suite
This module contains the constant enumeration for **ESP Transform Suite IDs**,
which is automatically generated from :class:`pcapkit.vendor.hip.esp_transform_suite.ESPTransformSuite`.
"""
from aenum import IntEnum, extend_enum
__all__ = ['ESPTransformSuite']
class ESPTransformSuite(IntEnum):
"""[ESPTransformSuite] ESP Transform Suite IDs"""
#: RESERVED [:rfc:`7402`]
RESERVED_0 = 0
#: AES-128-CBC with HMAC-SHA1 [:rfc:`3602`][:rfc:`2404`]
AES_128_CBC_with_HMAC_SHA1 = 1
#: DEPRECATED [:rfc:`7402`]
DEPRECATED_2 = 2
#: DEPRECATED [:rfc:`7402`]
DEPRECATED_3 = 3
#: DEPRECATED [:rfc:`7402`]
DEPRECATED_4 = 4
#: DEPRECATED [:rfc:`7402`]
DEPRECATED_5 = 5
#: DEPRECATED [:rfc:`7402`]
DEPRECATED_6 = 6
#: NULL with HMAC-SHA-256 [:rfc:`2410`][:rfc:`4868`]
NULL_with_HMAC_SHA_256 = 7
#: AES-128-CBC with HMAC-SHA-256 [:rfc:`3602`][:rfc:`4868`]
AES_128_CBC_with_HMAC_SHA_256 = 8
#: AES-256-CBC with HMAC-SHA-256 [:rfc:`3602`][:rfc:`4868`]
AES_256_CBC_with_HMAC_SHA_256 = 9
#: AES-CCM-8 [:rfc:`4309`]
AES_CCM_8 = 10
#: AES-CCM-16 [:rfc:`4309`]
AES_CCM_16 = 11
#: AES-GCM with an 8 octet ICV [:rfc:`4106`]
AES_GCM_with_an_8_octet_ICV = 12
#: AES-GCM with a 16 octet ICV [:rfc:`4106`]
AES_GCM_with_a_16_octet_ICV = 13
#: AES-CMAC-96 [:rfc:`4493`][:rfc:`4494`]
AES_CMAC_96 = 14
#: AES-GMAC [:rfc:`4543`]
AES_GMAC = 15
@staticmethod
def get(key: 'int | str', default: 'int' = -1) -> 'ESPTransformSuite':
"""Backport support for original codes.
Args:
key: Key to get enum item.
default: Default value if not found.
:meta private:
"""
if isinstance(key, int):
return ESPTransformSuite(key)
if key not in ESPTransformSuite._member_map_: # pylint: disable=no-member
return extend_enum(ESPTransformSuite, key, default)
return ESPTransformSuite[key] # type: ignore[misc]
@classmethod
def _missing_(cls, value: 'int') -> 'ESPTransformSuite':
"""Lookup function used when value is not found.
Args:
value: Value to get enum item.
"""
if not (isinstance(value, int) and 0 <= value <= 65535):
raise ValueError('%r is not a valid %s' % (value, cls.__name__))
if 16 <= value <= 65535:
#: Unassigned
return extend_enum(cls, 'Unassigned_%d' % value, value)
return super()._missing_(value)
|
42df663e70e5b0d8a6202f3334f164d6be1a1779
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/TbapiQueryAmountResponse.py
|
c393dfb78ced614cb7e963299f230d57a60906b4
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
TbapiQueryAmountResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class TbapiQueryAmountResponse(object):
def __init__(self):
self._amt_map = None
self._available_group_amt = None
@property
def amt_map(self):
return self._amt_map
@amt_map.setter
def amt_map(self, value):
self._amt_map = value
@property
def available_group_amt(self):
return self._available_group_amt
@available_group_amt.setter
def available_group_amt(self, value):
self._available_group_amt = value
def to_alipay_dict(self):
params = dict()
if self.amt_map:
if hasattr(self.amt_map, 'to_alipay_dict'):
params['amt_map'] = self.amt_map.to_alipay_dict()
else:
params['amt_map'] = self.amt_map
if self.available_group_amt:
if hasattr(self.available_group_amt, 'to_alipay_dict'):
params['available_group_amt'] = self.available_group_amt.to_alipay_dict()
else:
params['available_group_amt'] = self.available_group_amt
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TbapiQueryAmountResponse()
if 'amt_map' in d:
o.amt_map = d['amt_map']
if 'available_group_amt' in d:
o.available_group_amt = d['available_group_amt']
return o
|
4a4a0fc7cf6bc9107c44724cb87bebff85815371
|
9e417620383442e018a677179fa09cebb2ff8c6f
|
/tests/test_model.py
|
dae8d28cc2a79dd207d1e59adb4a1a2bf9f3cb85
|
[
"MIT"
] |
permissive
|
rgc99/irrigation_unlimited
|
12375871953af7fb5ccedd231560d946c51be2d2
|
21345ec6775b3d36eced42aa75178de451fc22b1
|
refs/heads/master
| 2023-08-23T09:22:56.898275
| 2023-08-19T02:00:11
| 2023-08-19T02:00:11
| 332,911,333
| 241
| 48
|
MIT
| 2023-07-30T23:21:42
| 2021-01-25T23:17:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
test_model.py
|
"""irrigation_unlimited model test template"""
# pylint: disable=unused-import
from datetime import timedelta
import json
import pytest
import homeassistant.core as ha
from custom_components.irrigation_unlimited.const import (
SERVICE_TIME_ADJUST,
)
from tests.iu_test_support import IUExam, mk_utc, mk_local
IUExam.quiet_mode()
# Remove the following decorator to run test
# @pytest.mark.skip
async def test_model(hass: ha.HomeAssistant, skip_dependencies, skip_history):
"""Model IUExam class."""
# pylint: disable=unused-argument
# Start an examination
async with IUExam(hass, "test_model.yaml") as exam:
# Prevent checking results. Helpful for just outputting
# results when it is known the test will fail.
# (set `output_events: true` in yaml)
# exam.no_check()
# Load any components required for the test
# await exam.async_load_component("homeassistant")
# await exam.async_load_component("input_boolean")
# Load dependencies if required
# await exam.load_dependencies()
# Run all tests
# await exam.run_all()
# Run a single test
# await exam.run_test(1)
# Start a test and do something...
await exam.begin_test(1)
# Make a service call
# await exam.call(
# SERVICE_TIME_ADJUST,
# {
# "entity_id": "binary_sensor.irrigation_unlimited_c1_m",
# "sequence_id": 1,
# "zones": 0,
# "actual": "00:10",
# },
# )
# Run to a point in time
# await exam.run_until(mk_utc("2021-01-04 06:02:00"))
# Run for a period of time
# await exam.run_for(timedelta(minutes=15))
# Check some things
#
# Inspect a zone entity
# sta = hass.states.get("binary_sensor.irrigation_unlimited_c1_z1")
# print(sta.state)
# print(sta.attributes)
#
# Raise an error if the "next_start" attribute is not a certain time
# assert sta.attributes["next_start"] == mk_local("2021-01-04 06:03")
#
# Print out the configuration
# print(json.dumps(exam.coordinator.as_dict(), default=str))
#
# Print out the virtual date and time
# print(f"The date and time is {exam.virtual_time}")
# Finish up the current test
await exam.finish_test()
# Check the exam results
exam.check_summary()
|
bee3c7ed1e26631b70701b916fefa2830bf89b26
|
cc819c6f09f522547c256056e6e5183dd1549755
|
/crossview_dataset/visualization/visualizer_3d.py
|
60fc76f5b47370ba0aa432f041c1df71121148fa
|
[] |
no_license
|
longcw/crossview_3d_pose_tracking
|
f826a1a2fe1c0783860f47948b8edf187ea8702e
|
718e5ca5628f1dac01dfd693043f3bb6b6604dfa
|
refs/heads/master
| 2023-03-18T20:36:32.884028
| 2023-03-09T02:52:15
| 2023-03-09T02:52:15
| 248,119,205
| 129
| 22
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,035
|
py
|
visualizer_3d.py
|
from vispy import scene
from vispy.scene import cameras, visuals
from vispy.visuals.line import LineVisual
from threading import Thread
from multiprocessing import Process, Queue
from queue import Full
import time
import numpy as np
import copy
class XYZAxisVisual(LineVisual):
"""
Simple 3D axis for indicating coordinate system orientation. Axes are
x=red, y=green, z=blue.
"""
def __init__(self, world_ltrb, max_z, **kwargs):
x1, y1, x2, y2 = world_ltrb
verts = np.array(
[
[x1, y1, 0],
[x2 + 10, y1, 0],
[x1, y1, 0],
[x1, y2 + 10, 0],
[x1, y1, 0],
[x1, y1, max_z + 10],
]
)
color = np.array(
[
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
]
)
LineVisual.__init__(
self, pos=verts, color=color, connect="segments", method="gl", **kwargs
)
def vispy_process(queue, world_ltrb):
def data_thread(queue, markers):
while True:
data = queue.get()
if data is None:
time.sleep(0.1)
canvas.close()
break
points, face_colors, edge_colors = data
if face_colors is not None and len(points) > 0:
edge_width = 1 if isinstance(edge_colors, str) else 3
markers.set_data(
points,
edge_width=edge_width,
edge_color=edge_colors,
face_color=face_colors,
)
else:
markers.set_data(points)
canvas = scene.SceneCanvas(
title="3D Poses", bgcolor="w", size=(400, 400), show=True
)
view = canvas.central_widget.add_view()
view.camera = cameras.TurntableCamera()
view.camera.fov = 80
view.camera.distance = (
max(world_ltrb[2] - world_ltrb[0], world_ltrb[3] - world_ltrb[1]) * 0.8
) # 200 or 800
visuals.create_visual_node(XYZAxisVisual)(world_ltrb, 100, parent=view.scene)
x1, y1, x2, y2 = world_ltrb
visuals.SurfacePlot(
x=np.array([x1, x2]),
y=np.array([y1, y2]),
z=np.zeros((2, 2)),
color=(0.5, 0.5, 0.5, 1),
parent=view.scene,
)
markers = scene.visuals.Markers()
markers.parent = view.scene
# events
def on_close(event):
if event.text.lower() == "q":
queue.put(None)
canvas.events.key_press.connect(on_close)
thread = Thread(target=data_thread, args=[queue, markers])
thread.start()
canvas.app.run()
thread.join()
class Visualizer3D(object):
def __init__(self, world_ltrb, color_generator):
self.color_generator = color_generator
self.world_ltrb = copy.deepcopy(world_ltrb)
self.ori_wcx = np.mean(self.world_ltrb[0::2])
self.ori_wcy = np.mean(self.world_ltrb[1::2])
self.world_ltrb[0::2] -= self.ori_wcx
self.world_ltrb[1::2] -= self.ori_wcy
self.queue = Queue(maxsize=1)
self.plot_process = Process(
target=vispy_process, args=[self.queue, self.world_ltrb]
)
self.plot_process.start()
def start(self):
self.plot_process.start()
def stop(self):
try:
self.queue.put(None, timeout=0.1)
except Full:
pass
def update(self, data_3d):
if data_3d is None:
poses = []
points = np.empty([0, 3], dtype=float)
else:
poses = data_3d["poses"]
points = np.asarray([pose["points_3d"] for pose in poses]).reshape(-1, 3)
face_colors = None
edge_colors = "black"
if len(poses) > 0:
points[:, 0] -= self.ori_wcx
points[:, 1] -= self.ori_wcy
xmin, ymin, xmax, ymax = self.world_ltrb
scores = np.asarray([pose["scores"] for pose in poses]).reshape(-1)
keep = (
(scores > 0.0)
& (points[:, 0] > xmin)
& (points[:, 0] < xmax)
& (points[:, 1] > ymin)
& (points[:, 1] < ymax)
)
points = points[keep]
repeats = len(poses[0]["points_3d"])
face_colors = self.get_colors([pose["id"] for pose in poses], repeats)
face_colors = face_colors[keep]
try:
self.queue.put((points, face_colors, edge_colors), timeout=1)
return True
except Full:
return False
def get_colors(self, target_ids, repeats):
colors = np.asarray(
[self.color_generator.get_color(target_id) for target_id in target_ids],
dtype=float,
)
colors /= 255.0
colors = colors[:, ::-1]
colors = np.repeat(colors, repeats, axis=0)
return colors
|
801d3ba21a7cf9704d3ef59d5f9477b3eb83f65e
|
05643b9b4d20db912c3dbfbc191cadea3143016c
|
/instrumentation/opentelemetry-instrumentation-pymongo/tests/test_pymongo.py
|
8eab3b701c8bdb7a124fca8a4782e94b847dc425
|
[
"Apache-2.0"
] |
permissive
|
open-telemetry/opentelemetry-python-contrib
|
35566cd088aa0b23ca977109fcd435ee480784b9
|
0871dd455c0adfa125a2f258a0b55c47a5da5227
|
refs/heads/main
| 2023-08-26T07:30:40.212226
| 2023-08-21T16:42:12
| 2023-08-21T16:42:12
| 220,524,743
| 476
| 401
|
Apache-2.0
| 2023-09-14T21:36:33
| 2019-11-08T18:23:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,983
|
py
|
test_pymongo.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from opentelemetry import context
from opentelemetry import trace as trace_api
from opentelemetry.instrumentation.pymongo import (
CommandTracer,
PymongoInstrumentor,
)
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.test_base import TestBase
class TestPymongo(TestBase):
def setUp(self):
super().setUp()
self.tracer = self.tracer_provider.get_tracer(__name__)
self.start_callback = mock.MagicMock()
self.success_callback = mock.MagicMock()
self.failed_callback = mock.MagicMock()
def test_pymongo_instrumentor(self):
mock_register = mock.Mock()
patch = mock.patch(
"pymongo.monitoring.register", side_effect=mock_register
)
with patch:
PymongoInstrumentor().instrument()
self.assertTrue(mock_register.called)
def test_started(self):
command_attrs = {
"command_name": "find",
}
command_tracer = CommandTracer(
self.tracer, request_hook=self.start_callback
)
mock_event = MockEvent(
command_attrs, ("test.com", "1234"), "test_request_id"
)
command_tracer.started(event=mock_event)
# the memory exporter can't be used here because the span isn't ended
# yet
# pylint: disable=protected-access
span = command_tracer._pop_span(mock_event)
self.assertIs(span.kind, trace_api.SpanKind.CLIENT)
self.assertEqual(span.name, "database_name.command_name")
self.assertEqual(span.attributes[SpanAttributes.DB_SYSTEM], "mongodb")
self.assertEqual(
span.attributes[SpanAttributes.DB_NAME], "database_name"
)
self.assertEqual(
span.attributes[SpanAttributes.DB_STATEMENT], "command_name"
)
self.assertEqual(
span.attributes[SpanAttributes.NET_PEER_NAME], "test.com"
)
self.assertEqual(span.attributes[SpanAttributes.NET_PEER_PORT], "1234")
self.start_callback.assert_called_once_with(span, mock_event)
def test_succeeded(self):
mock_event = MockEvent({})
command_tracer = CommandTracer(
self.tracer,
request_hook=self.start_callback,
response_hook=self.success_callback,
)
command_tracer.started(event=mock_event)
command_tracer.succeeded(event=mock_event)
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertIs(span.status.status_code, trace_api.StatusCode.UNSET)
self.assertIsNotNone(span.end_time)
self.start_callback.assert_called_once()
self.success_callback.assert_called_once()
def test_not_recording(self):
mock_tracer = mock.Mock()
mock_span = mock.Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
mock_event = MockEvent({})
command_tracer = CommandTracer(mock_tracer)
command_tracer.started(event=mock_event)
command_tracer.succeeded(event=mock_event)
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
def test_suppression_key(self):
mock_tracer = mock.Mock()
mock_span = mock.Mock()
mock_span.is_recording.return_value = True
mock_tracer.start_span.return_value = mock_span
mock_event = MockEvent({})
mock_event.command.get = mock.Mock()
mock_event.command.get.return_value = "dummy"
token = context.attach(
context.set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)
)
try:
command_tracer = CommandTracer(mock_tracer)
command_tracer.started(event=mock_event)
command_tracer.succeeded(event=mock_event)
finally:
context.detach(token)
# if suppression key is set, CommandTracer methods return immediately, so command.get is not invoked.
self.assertFalse(
mock_event.command.get.called # pylint: disable=no-member
)
def test_failed(self):
mock_event = MockEvent({})
command_tracer = CommandTracer(
self.tracer,
request_hook=self.start_callback,
failed_hook=self.failed_callback,
)
command_tracer.started(event=mock_event)
command_tracer.failed(event=mock_event)
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertIs(
span.status.status_code,
trace_api.StatusCode.ERROR,
)
self.assertEqual(span.status.description, "failure")
self.assertIsNotNone(span.end_time)
self.start_callback.assert_called_once()
self.failed_callback.assert_called_once()
def test_multiple_commands(self):
first_mock_event = MockEvent({}, ("firstUrl", "123"), "first")
second_mock_event = MockEvent({}, ("secondUrl", "456"), "second")
command_tracer = CommandTracer(self.tracer)
command_tracer.started(event=first_mock_event)
command_tracer.started(event=second_mock_event)
command_tracer.succeeded(event=first_mock_event)
command_tracer.failed(event=second_mock_event)
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 2)
first_span = spans_list[0]
second_span = spans_list[1]
self.assertIs(
first_span.status.status_code,
trace_api.StatusCode.UNSET,
)
self.assertIs(
second_span.status.status_code,
trace_api.StatusCode.ERROR,
)
def test_int_command(self):
command_attrs = {
"command_name": 123,
}
mock_event = MockEvent(command_attrs)
command_tracer = CommandTracer(self.tracer)
command_tracer.started(event=mock_event)
command_tracer.succeeded(event=mock_event)
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertEqual(span.name, "database_name.command_name")
def test_no_op_tracer(self):
mock_event = MockEvent({})
tracer = trace_api.NoOpTracer()
command_tracer = CommandTracer(tracer)
command_tracer.started(event=mock_event)
command_tracer.succeeded(event=mock_event)
spans_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans_list), 0)
class MockCommand:
def __init__(self, command_attrs):
self.command_attrs = command_attrs
def get(self, key, default=""):
return self.command_attrs.get(key, default)
class MockEvent:
def __init__(self, command_attrs, connection_id=None, request_id=""):
self.command = MockCommand(command_attrs)
self.connection_id = connection_id
self.request_id = request_id
def __getattr__(self, item):
return item
|
8e930c4743f456593ba84c72408f7b78f5e0cef2
|
29bd0e9b5bdef7e2b4ff79edda3be258d5bde63c
|
/tests/test_lazyjson.py
|
f29482f0748b89041275f956933a41f1191d338a
|
[
"BSD-2-Clause"
] |
permissive
|
xonsh/xonsh
|
4dec5e4c14a4a82f81277a89d8ab6091869fc29e
|
60f0145ed893cb73bbfcf336c448238981010d41
|
refs/heads/main
| 2023-08-31T03:37:57.786839
| 2023-08-23T15:30:20
| 2023-08-23T15:30:20
| 29,620,400
| 6,374
| 684
|
NOASSERTION
| 2023-09-11T02:52:37
| 2015-01-21T22:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,736
|
py
|
test_lazyjson.py
|
"""Tests lazy json functionality."""
from io import StringIO
from xonsh.lazyjson import LazyJSON, LJNode, index, ljdump
def test_index_int():
exp = {"offsets": 0, "sizes": 2}
s, obs = index(42)
assert exp == obs
def test_index_str():
exp = {"offsets": 0, "sizes": 7}
s, obs = index("wakka")
assert exp == obs
def test_index_list_ints():
exp = {"offsets": [1, 4, 0], "sizes": [1, 2, 8]}
s, obs = index([1, 42])
assert exp == obs
def test_index_list_str():
exp = {"offsets": [1, 10, 0], "sizes": [7, 8, 20]}
s, obs = index(["wakka", "jawaka"])
assert exp == obs
def test_index_list_str_int():
exp = {"offsets": [1, 10, 0], "sizes": [7, 2, 14]}
s, obs = index(["wakka", 42])
assert exp == obs
def test_index_list_int_str():
exp = {"offsets": [1, 5, 14, 0], "sizes": [2, 7, 8, 24]}
s, obs = index([42, "wakka", "jawaka"])
assert exp == obs
def test_index_dict_int():
exp = {
"offsets": {"wakka": 10, "__total__": 0},
"sizes": {"wakka": 2, "__total__": 14},
}
s, obs = index({"wakka": 42})
assert exp == obs
def test_index_dict_str():
exp = {
"offsets": {"wakka": 10, "__total__": 0},
"sizes": {"wakka": 8, "__total__": 20},
}
s, obs = index({"wakka": "jawaka"})
assert exp == obs
def test_index_dict_dict_int():
exp = {
"offsets": {"wakka": {"jawaka": 21, "__total__": 10}, "__total__": 0},
"sizes": {"wakka": {"jawaka": 2, "__total__": 15}, "__total__": 27},
}
s, obs = index({"wakka": {"jawaka": 42}})
assert exp == obs
def test_lazy_load_index():
f = StringIO()
ljdump({"wakka": 42}, f)
f.seek(0)
lj = LazyJSON(f)
assert {"wakka": 10, "__total__": 0} == lj.offsets
assert {"wakka": 2, "__total__": 14} == lj.sizes
def test_lazy_int():
f = StringIO()
ljdump(42, f)
f.seek(0)
lj = LazyJSON(f)
assert 42 == lj.load()
def test_lazy_str():
f = StringIO()
ljdump("wakka", f)
f.seek(0)
lj = LazyJSON(f)
assert "wakka" == lj.load()
def test_lazy_list_empty():
x = []
f = StringIO()
ljdump(x, f)
f.seek(0)
lj = LazyJSON(f)
assert 0 == len(lj)
assert x == lj.load()
def test_lazy_list_ints():
x = [0, 1, 6, 28, 496, 8128]
f = StringIO()
ljdump(x, f)
f.seek(0)
lj = LazyJSON(f)
assert 28 == lj[3]
assert x[:2:-2] == lj[:2:-2]
assert x == [_ for _ in lj]
assert x == lj.load()
def test_lazy_list_str():
x = ["I", "have", "seen", "the", "wind", "blow"]
f = StringIO()
ljdump(x, f)
f.seek(0)
lj = LazyJSON(f)
assert "the" == lj[3]
assert x[:2:-2] == lj[:2:-2]
assert x == [_ for _ in lj]
assert x == lj.load()
def test_lazy_list_list_ints():
x = [[0, 1], [6, 28], [496, 8128]]
f = StringIO()
ljdump(x, f)
f.seek(0)
lj = LazyJSON(f)
assert isinstance(lj[1], LJNode)
assert 28 == lj[1][1]
assert [6 == 28], lj[1].load()
assert x == lj.load()
def test_lazy_dict_empty():
x = {}
f = StringIO()
ljdump(x, f)
f.seek(0)
lj = LazyJSON(f)
assert 0 == len(lj)
assert x == lj.load()
def test_lazy_dict():
f = StringIO()
ljdump({"wakka": 42}, f)
f.seek(0)
lj = LazyJSON(f)
assert ["wakka"] == list(lj.keys())
assert 42 == lj["wakka"]
assert 1 == len(lj)
assert {"wakka": 42} == lj.load()
def test_lazy_dict_dict_int():
x = {"wakka": {"jawaka": 42}}
f = StringIO()
ljdump(x, f)
f.seek(0)
lj = LazyJSON(f)
assert ["wakka"] == list(lj.keys())
assert isinstance(lj["wakka"], LJNode)
assert 42 == lj["wakka"]["jawaka"]
assert 1 == len(lj)
assert x == lj.load()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.