blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d4fe06378ebe096502af1bb338d0b8aa6f903c1
|
07b158ab4d91f779eefa007d3c6dc71ea9508a2b
|
/demo/tessagon_common_demo.py
|
590d79e9e312b64e58ae662b23ded3e11951eeb8
|
[
"Apache-2.0"
] |
permissive
|
cwant/tessagon
|
dac1f2dce4f876b823a80c9def856817a543d720
|
e5b9e87aead33e2377240d5925bf328a99031229
|
refs/heads/master
| 2023-08-31T17:37:58.906444
| 2023-07-03T16:42:02
| 2023-07-03T16:42:02
| 104,539,159
| 214
| 21
|
Apache-2.0
| 2022-11-18T03:06:24
| 2017-09-23T03:40:37
|
Python
|
UTF-8
|
Python
| false
| false
| 17,151
|
py
|
tessagon_common_demo.py
|
import inspect
import re
from tessagon import TessagonDiscovery
from tessagon.misc.shapes import cylinder, torus, one_sheet_hyperboloid, \
klein, mobius, sphere, paraboloid, general_torus, warp_var
class TessagonCommonDemo:
# This is an abstract class that handles common code for the
# demos. Each subclass needs to implement the 'tessellate' method
# which instantiates each tessagon class, creates a mesh, and puts it
# in the scene for the particular software package.
def class_to_method(self, cls):
# We have rendering functions like self.hex_tessagon,
# self.square_tessagon, etc, and we would like to get these
# function when passed the tessagon class cls, e.g., HexTessagon,
# SquareTessagon, etc.
# Class to snake case, e.g. HexTessagon -> hex_tessagon
method_name = re.sub(r'(?<!^)(?=[A-Z])', '_', cls.__name__).lower()
# Return alias to method, e.g. self.hex_tessagon
return getattr(self, method_name)
def method_to_class(self):
# When called from a method name like 'square_tessagon' we want to return
# the Tessagon class SquareTessagon
method_name = inspect.stack()[1][3]
# Convert from snake_case to CamelCase
class_name = ''.join(word.title() for word in method_name.split('_'))
return TessagonDiscovery.get_class(class_name)
def create_objects(self):
find_all = TessagonDiscovery()
classes = find_all.with_classification('regular').to_list() \
+ find_all.with_classification('archimedean').to_list() \
+ find_all.with_classification('laves').to_list() \
+ find_all.with_classification('non_edge').to_list() \
+ find_all.with_classification('non_convex').to_list()
# A long row of each tiling pattern, with color patterns underneath
offset = 15
column = 0
# Output meshes for potential inspection (e.g. test suite)
meshes = {}
for cls in classes:
key = cls.__name__
meshes[key] = {'color_patterns': {},
'extra_parameters': {}}
method = self.class_to_method(cls)
row = 0
# Non-color pattern object
meshes[key]['regular'] = method([column, 0, row])
for i in range(cls.num_color_patterns()):
color_pattern = i + 1
row -= offset
# Color pattern object
meshes[key]['color_patterns'][color_pattern] = \
method([column, 0, row], color_pattern=color_pattern)
for parameter in cls.metadata.extra_parameters:
parameter_info = cls.metadata.extra_parameters[parameter]
meshes[key]['extra_parameters'][parameter] = {}
if parameter_info['type'] == 'float':
values = dict(
low=(parameter_info['default'] + parameter_info['min']) / 2.0,
high=(parameter_info['default'] + parameter_info['max']) / 2.0)
for value_name in values:
value = values[value_name]
row -= offset
kwargs = {parameter: value}
meshes[key]['extra_parameters'][parameter][value_name] = \
method([column, 0, row], **kwargs)
column += offset
return meshes
def hex_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 45,
'v_num': 3,
'u_cyclic': True,
'v_cyclic': False,
'position': position
}
HexTessagon = self.method_to_class()
return self.tessellate(cylinder, HexTessagon,
**{**kwargs, **options})
def tri_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 36,
'v_num': 12,
'position': position
}
TriTessagon = self.method_to_class()
return self.tessellate(torus, TriTessagon,
**{**kwargs, **options})
def square_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 24,
'v_num': 6,
'rot_factor': 2,
'position': position
}
SquareTessagon = self.method_to_class()
return self.tessellate(torus, SquareTessagon,
**{**kwargs, **options})
def rhombus_klein(self, u, v):
(x, y, z) = klein(u, v)
return (x, z, -y)
def rhombus_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 40,
'v_num': 6,
'v_twist': True,
'position': position
}
RhombusTessagon = self.method_to_class()
return self.tessellate(self.rhombus_klein, RhombusTessagon,
**{**kwargs, **options})
def octo_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 4,
'v_num': 40,
'v_cyclic': True,
'u_cyclic': False,
'u_twist': True,
'position': position
}
OctoTessagon = self.method_to_class()
return self.tessellate(mobius, OctoTessagon,
**{**kwargs, **options})
def hex_tri_tessagon(self, position, **kwargs):
options = {
'u_range': [-1.0, 1.0],
'v_range': [-1.0, 1.0],
'u_num': 15,
'v_num': 10,
'u_cyclic': False,
'v_cyclic': False,
'position': position
}
HexTriTessagon = self.method_to_class()
return self.tessellate(paraboloid, HexTriTessagon,
**{**kwargs, **options})
def hex_square_tri_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 45,
'v_num': 5,
'position': position
}
HexSquareTriTessagon = self.method_to_class()
return self.tessellate(torus, HexSquareTriTessagon,
**{**kwargs, **options})
def pythagorean_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 25,
'v_num': 6,
'position': position
}
PythagoreanTessagon = self.method_to_class()
return self.tessellate(torus, PythagoreanTessagon,
**{**kwargs, **options})
def brick_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 15,
'v_num': 3,
'rot_factor': 3,
'position': position
}
BrickTessagon = self.method_to_class()
return self.tessellate(torus, BrickTessagon,
**{**kwargs, **options})
def dodeca_tessagon(self, position, **kwargs):
options = {
'u_range': [-1.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 4,
'v_num': 10,
'u_cyclic': False,
'v_cyclic': True,
'position': position
}
DodecaTessagon = self.method_to_class()
return self.tessellate(one_sheet_hyperboloid, DodecaTessagon,
**{**kwargs, **options})
def big_hex_tri_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 56,
'v_num': 8,
'position': position
}
BigHexTriTessagon = self.method_to_class()
return self.tessellate(torus, BigHexTriTessagon,
**{**kwargs, **options})
def square_tri_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 20,
'v_num': 4,
'position': position
}
SquareTriTessagon = self.method_to_class()
return self.tessellate(torus, SquareTriTessagon,
**{**kwargs, **options})
def weave_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 8,
'v_num': 6,
'v_cyclic': False,
'rot_factor': 1,
'position': position
}
WeaveTessagon = self.method_to_class()
return self.tessellate(sphere, WeaveTessagon,
**{**kwargs, **options})
def chubby_torus(self, u, v):
# u_cyclic = True, v_cyclic = True
r1 = 5.0
r2 = 1.5
return general_torus(r1, r2, v, warp_var(u, 0.2))
def floret_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 2,
'v_num': 12,
'parallelogram_vectors': [[9,-1], [1, 3]],
'position': position
}
FloretTessagon = self.method_to_class()
return self.tessellate(self.chubby_torus, FloretTessagon,
**{**kwargs, **options})
def flip_axes_torus(self, u, v):
return torus(v, -u)
def hex_big_tri_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 2,
'v_num': 5,
'parallelogram_vectors': [[5, 1], [-3, 5]],
'position': position
}
HexBigTriTessagon = self.method_to_class()
return self.tessellate(self.flip_axes_torus, HexBigTriTessagon,
**{**kwargs, **options})
def zig_zag_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 10,
'v_num': 2,
'rot_factor': 2,
'position': position
}
ZigZagTessagon = self.method_to_class()
return self.tessellate(torus, ZigZagTessagon,
**{**kwargs, **options})
def dissected_square_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 32,
'v_num': 4,
'u_cyclic': True,
'v_cyclic': False,
'position': position
}
DissectedSquareTessagon = self.method_to_class()
return self.tessellate(cylinder, DissectedSquareTessagon,
**{**kwargs, **options})
def square_tri2_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 35,
'v_num': 3,
'v_cyclic': False,
'position': position
}
SquareTri2Tessagon = self.method_to_class()
return self.tessellate(cylinder, SquareTri2Tessagon,
**{**kwargs, **options})
def dodeca_tri_tessagon(self, position, **kwargs):
options = {
'u_range': [-1.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 3,
'v_num': 20,
'u_cyclic': False,
'v_cyclic': True,
'position': position
}
DodecaTriTessagon = self.method_to_class()
return self.tessellate(one_sheet_hyperboloid, DodecaTriTessagon,
**{**kwargs, **options})
def dissected_triangle2_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 10,
'v_num': 2,
'rot_factor': 2,
'position': position
}
DissectedTriangeTessagon = self.method_to_class()
return self.tessellate(torus, DissectedTriangleTessagon,
**{**kwargs, **options})
def dissected_triangle_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 16,
'v_num': 3,
'u_cyclic': True,
'v_cyclic': False,
'position': position
}
DissectedTriangleTessagon = self.method_to_class()
return self.tessellate(cylinder, DissectedTriangleTessagon,
**{**kwargs, **options})
def dissected_hex_quad_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 12,
'v_num': 24,
'u_cyclic': True,
'v_cyclic': True,
'position': position
}
DissectedHexQuadTessagon = self.method_to_class()
return self.tessellate(self.chubby_torus, DissectedHexQuadTessagon,
**{**kwargs, **options})
def dissected_hex_tri_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 10,
'v_num': 20,
'u_cyclic': True,
'v_cyclic': True,
'position': position
}
DissectedHexTriTessagon = self.method_to_class()
return self.tessellate(self.chubby_torus, DissectedHexTriTessagon,
**{**kwargs, **options})
def penta_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 20,
'v_num': 4,
'position': position
}
PentaTessagon = self.method_to_class()
return self.tessellate(torus, PentaTessagon,
**{**kwargs, **options})
def penta2_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 50,
'v_num': 4,
'position': position
}
Penta2Tessagon = self.method_to_class()
return self.tessellate(torus, Penta2Tessagon,
**{**kwargs, **options})
def stanley_park_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 25,
'v_num': 10,
'position': position
}
StanleyParkTessagon = self.method_to_class()
return self.tessellate(torus, StanleyParkTessagon,
**{**kwargs, **options})
def valemount_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 16,
'v_num': 4,
'rot_factor': 2,
'position': position
}
ValemountTessagon = self.method_to_class()
return self.tessellate(torus, ValemountTessagon,
**{**kwargs, **options})
def islamic_hex_stars_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 4,
'v_num': 1,
'rot_factor': 5,
'position': position
}
IslamicHexStarsTessagon = self.method_to_class()
return self.tessellate(torus, IslamicHexStarsTessagon,
**{**kwargs, **options})
def islamic_stars_crosses_tessagon(self, position, **kwargs):
options = {
'u_range': [-1.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 3,
'v_num': 15,
'rot_factor': 2,
'u_cyclic': False,
'v_cyclic': True,
'position': position
}
IslamicStarsCrossesTessagon = self.method_to_class()
return self.tessellate(one_sheet_hyperboloid, IslamicStarsCrossesTessagon,
**{**kwargs, **options})
def cloverdale_tessagon(self, position, **kwargs):
options = {
'u_range': [0.0, 1.0],
'v_range': [0.0, 1.0],
'u_num': 14,
'v_num': 14,
'v_cyclic': False,
'position': position
}
CloverdaleTessagon = self.method_to_class()
return self.tessellate(sphere, CloverdaleTessagon,
**{**kwargs, **options})
|
c17a39e406f08cf877736b581f1690cddcfd80bd
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/trafficmanager/azure-mgmt-trafficmanager/azure/mgmt/trafficmanager/operations/__init__.py
|
6528d2d2ddfc2c31d95a9d1bad83999c0f97371a
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
__init__.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._endpoints_operations import EndpointsOperations
from ._profiles_operations import ProfilesOperations
from ._geographic_hierarchies_operations import GeographicHierarchiesOperations
from ._heat_map_operations import HeatMapOperations
from ._traffic_manager_user_metrics_keys_operations import TrafficManagerUserMetricsKeysOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"EndpointsOperations",
"ProfilesOperations",
"GeographicHierarchiesOperations",
"HeatMapOperations",
"TrafficManagerUserMetricsKeysOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
59f5f349d5772493ed5e763709e240c4beea04e9
|
b10e501b17337b685a2fef01f63d91c427bf62d6
|
/raytracing/examples/sourceCollection.py
|
72e10331e515596427e280ce9052ce9910012b90
|
[
"MIT"
] |
permissive
|
DCC-Lab/RayTracing
|
5ebd1a982e390fab5397a6309aa832efe9c62d20
|
bbf715b9b9dc8317d3d5d5bd550a726f6908b3bb
|
refs/heads/master
| 2023-04-08T16:41:59.211239
| 2023-02-22T06:02:02
| 2023-02-22T06:02:02
| 166,473,288
| 185
| 38
|
MIT
| 2023-08-14T11:39:05
| 2019-01-18T21:13:35
|
Python
|
UTF-8
|
Python
| false
| false
| 988
|
py
|
sourceCollection.py
|
import envexamples
from raytracing import *
path = ImagingPath(label="Original setup")
path.append(Space(d=100))
path.append(Lens(f=35,diameter=25))
path.append(Space(d=35))
path.append(Aperture(diameter=0.2))
path.displayWithObject(diameter=1, removeBlocked=False)
path.reportEfficiency(objectDiameter=1, emissionHalfAngle=1.57)
path2 = ImagingPath(label="Better system imaging the emission spot on detector")
path2.append(Space(d=70))
path2.append(Lens(f=35,diameter=25))
path2.append(Space(d=70))
path2.append(Aperture(diameter=0.2))
path2.displayWithObject(diameter=1, removeBlocked=False)
path2.reportEfficiency(objectDiameter=1, emissionHalfAngle=1.57)
path3 = ImagingPath(label="Best system and larger NA imaging spot on detector")
path3.objectHeight=1
path3.append(System4f(f1=35, diameter1=25, f2=35, diameter2=35))
path3.append(Aperture(diameter=0.2))
path3.displayWithObject(diameter=1, removeBlocked=False)
#path3.reportEfficiency(objectDiameter=1, emissionHalfAngle=1.57)
|
44c57625cc2681cb49c2ecd01f65a5f761c63b9d
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/sharepoint/fields/image_value.py
|
8900222abac0b54b409ba517ea02c6d768b69d61
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
image_value.py
|
from office365.runtime.client_value import ClientValue
class ImageFieldValue(ClientValue):
def __init__(self, server_relative_url=None):
"""
:param str server_relative_url:
"""
self.serverRelativeUrl = server_relative_url
self.type = "thumbnail",
self.fileName = None
self.nativeFile = {}
self.fieldName = "Image"
self.serverUrl = None
self.fieldId = None
self.id = None
@property
def entity_type_name(self):
return None
|
129be1a69d1c1d84ee672b8335cf892544d032ca
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/android_ip_webcam/binary_sensor.py
|
6f17616a216bb08db79399b5c5393f6e5a2e3dec
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
binary_sensor.py
|
"""Support for Android IP Webcam binary sensors."""
from __future__ import annotations
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN, MOTION_ACTIVE
from .coordinator import AndroidIPCamDataUpdateCoordinator
from .entity import AndroidIPCamBaseEntity
BINARY_SENSOR_DESCRIPTION = BinarySensorEntityDescription(
key="motion_active",
name="Motion active",
device_class=BinarySensorDeviceClass.MOTION,
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the IP Webcam sensors from config entry."""
coordinator: AndroidIPCamDataUpdateCoordinator = hass.data[DOMAIN][
config_entry.entry_id
]
async_add_entities([IPWebcamBinarySensor(coordinator)])
class IPWebcamBinarySensor(AndroidIPCamBaseEntity, BinarySensorEntity):
"""Representation of an IP Webcam binary sensor."""
def __init__(
self,
coordinator: AndroidIPCamDataUpdateCoordinator,
) -> None:
"""Initialize the binary sensor."""
self.entity_description = BINARY_SENSOR_DESCRIPTION
self._attr_unique_id = (
f"{coordinator.config_entry.entry_id}-{BINARY_SENSOR_DESCRIPTION.key}"
)
super().__init__(coordinator)
@property
def available(self) -> bool:
"""Return avaibility if setting is enabled."""
return MOTION_ACTIVE in self.cam.enabled_sensors and super().available
@property
def is_on(self) -> bool:
"""Return if motion is detected."""
return self.cam.get_sensor_value(MOTION_ACTIVE) == 1.0
|
5d31bf1918df231facee10e50c01c22cb5958f88
|
318b737f3fe69171f706d2d990c818090ee6afce
|
/demo/predict-taxi-trip-duration/script/convert_data.py
|
7808dadc689af878ac7ed6cf3425076ea63117df
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/OpenMLDB
|
e884c33f62177a70749749bd3b67e401c135f645
|
a013ba33e4ce131353edc71e27053b1801ffb8f7
|
refs/heads/main
| 2023-09-01T02:15:28.821235
| 2023-08-31T11:42:02
| 2023-08-31T11:42:02
| 346,976,717
| 3,323
| 699
|
Apache-2.0
| 2023-09-14T09:55:44
| 2021-03-12T07:18:31
|
C++
|
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
convert_data.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module of covert data from system stdin"""
import sys
import time
i = 0
for line in sys.stdin:
if i == 0:
i += 1
print(line.strip())
continue
arr = line.strip().split(",")
arr[2] = str(int(time.mktime(time.strptime(arr[2], "%Y-%m-%d %H:%M:%S"))) * 1000)
arr[3] = str(int(time.mktime(time.strptime(arr[3], "%Y-%m-%d %H:%M:%S"))) * 1000)
print(",".join(arr))
|
4ecaac67e82bdc440e758980347b64d89981035f
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/python/dgl/distributed/partition.py
|
da0ab445690eb8497c4633f59022fd91f335827e
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 51,241
|
py
|
partition.py
|
"""Functions for partitions. """
import json
import logging
import os
import time
import numpy as np
from .. import backend as F
from ..base import DGLError, EID, ETYPE, NID, NTYPE
from ..convert import to_homogeneous
from ..data.utils import load_graphs, load_tensors, save_graphs, save_tensors
from ..partition import (
get_peak_mem,
metis_partition_assignment,
partition_graph_with_halo,
)
from ..random import choice as random_choice
from ..transforms import sort_csc_by_tag, sort_csr_by_tag
from .constants import DEFAULT_ETYPE, DEFAULT_NTYPE
from .graph_partition_book import (
_etype_str_to_tuple,
_etype_tuple_to_str,
RangePartitionBook,
)
RESERVED_FIELD_DTYPE = {
"inner_node": F.uint8, # A flag indicates whether the node is inside a partition.
"inner_edge": F.uint8, # A flag indicates whether the edge is inside a partition.
NID: F.int64,
EID: F.int64,
NTYPE: F.int16,
# `sort_csr_by_tag` and `sort_csc_by_tag` works on int32/64 only.
ETYPE: F.int32,
}
def _format_part_metadata(part_metadata, formatter):
"""Format etypes with specified formatter."""
for key in ["edge_map", "etypes"]:
if key not in part_metadata:
continue
orig_data = part_metadata[key]
if not isinstance(orig_data, dict):
continue
new_data = {}
for etype, data in orig_data.items():
etype = formatter(etype)
new_data[etype] = data
part_metadata[key] = new_data
return part_metadata
def _load_part_config(part_config):
"""Load part config and format."""
try:
with open(part_config) as f:
part_metadata = _format_part_metadata(
json.load(f), _etype_str_to_tuple
)
except AssertionError as e:
raise DGLError(
f"Failed to load partition config due to {e}. "
"Probably caused by outdated config. If so, please refer to "
"https://github.com/dmlc/dgl/tree/master/tools#change-edge-"
"type-to-canonical-edge-type-for-partition-configuration-json"
)
return part_metadata
def _dump_part_config(part_config, part_metadata):
"""Format and dump part config."""
part_metadata = _format_part_metadata(part_metadata, _etype_tuple_to_str)
with open(part_config, "w") as outfile:
json.dump(part_metadata, outfile, sort_keys=False, indent=4)
def _save_graphs(filename, g_list, formats=None, sort_etypes=False):
"""Preprocess partitions before saving:
1. format data types.
2. sort csc/csr by tag.
"""
for g in g_list:
for k, dtype in RESERVED_FIELD_DTYPE.items():
if k in g.ndata:
g.ndata[k] = F.astype(g.ndata[k], dtype)
if k in g.edata:
g.edata[k] = F.astype(g.edata[k], dtype)
for g in g_list:
if (not sort_etypes) or (formats is None):
continue
if "csr" in formats:
g = sort_csr_by_tag(g, tag=g.edata[ETYPE], tag_type="edge")
if "csc" in formats:
g = sort_csc_by_tag(g, tag=g.edata[ETYPE], tag_type="edge")
save_graphs(filename, g_list, formats=formats)
def _get_inner_node_mask(graph, ntype_id):
if NTYPE in graph.ndata:
dtype = F.dtype(graph.ndata["inner_node"])
return (
graph.ndata["inner_node"]
* F.astype(graph.ndata[NTYPE] == ntype_id, dtype)
== 1
)
else:
return graph.ndata["inner_node"] == 1
def _get_inner_edge_mask(graph, etype_id):
if ETYPE in graph.edata:
dtype = F.dtype(graph.edata["inner_edge"])
return (
graph.edata["inner_edge"]
* F.astype(graph.edata[ETYPE] == etype_id, dtype)
== 1
)
else:
return graph.edata["inner_edge"] == 1
def _get_part_ranges(id_ranges):
res = {}
for key in id_ranges:
# Normally, each element has two values that represent the starting ID and the ending ID
# of the ID range in a partition.
# If not, the data is probably still in the old format, in which only the ending ID is
# stored. We need to convert it to the format we expect.
if not isinstance(id_ranges[key][0], list):
start = 0
for i, end in enumerate(id_ranges[key]):
id_ranges[key][i] = [start, end]
start = end
res[key] = np.concatenate(
[np.array(l) for l in id_ranges[key]]
).reshape(-1, 2)
return res
def load_partition(part_config, part_id, load_feats=True):
"""Load data of a partition from the data path.
A partition data includes a graph structure of the partition, a dict of node tensors,
a dict of edge tensors and some metadata. The partition may contain the HALO nodes,
which are the nodes replicated from other partitions. However, the dict of node tensors
only contains the node data that belongs to the local partition. Similarly, edge tensors
only contains the edge data that belongs to the local partition. The metadata include
the information of the global graph (not the local partition), which includes the number
of nodes, the number of edges as well as the node assignment of the global graph.
The function currently loads data through the local filesystem interface.
Parameters
----------
part_config : str
The path of the partition config file.
part_id : int
The partition ID.
load_feats : bool, optional
Whether to load node/edge feats. If False, the returned node/edge feature
dictionaries will be empty. Default: True.
Returns
-------
DGLGraph
The graph partition structure.
Dict[str, Tensor]
Node features.
Dict[(str, str, str), Tensor]
Edge features.
GraphPartitionBook
The graph partition information.
str
The graph name
List[str]
The node types
List[(str, str, str)]
The edge types
"""
config_path = os.path.dirname(part_config)
relative_to_config = lambda path: os.path.join(config_path, path)
with open(part_config) as conf_f:
part_metadata = json.load(conf_f)
assert (
"part-{}".format(part_id) in part_metadata
), "part-{} does not exist".format(part_id)
part_files = part_metadata["part-{}".format(part_id)]
assert (
"part_graph" in part_files
), "the partition does not contain graph structure."
partition_path = relative_to_config(part_files["part_graph"])
logging.info(
"Start to load partition from %s which is "
"%d bytes. It may take non-trivial "
"time for large partition.",
partition_path,
os.path.getsize(partition_path),
)
graph = load_graphs(partition_path)[0][0]
logging.info("Finished loading partition.")
assert (
NID in graph.ndata
), "the partition graph should contain node mapping to global node ID"
assert (
EID in graph.edata
), "the partition graph should contain edge mapping to global edge ID"
gpb, graph_name, ntypes, etypes = load_partition_book(part_config, part_id)
ntypes_list = list(ntypes.keys())
etypes_list = list(etypes.keys())
if "DGL_DIST_DEBUG" in os.environ:
for ntype in ntypes:
ntype_id = ntypes[ntype]
# graph.ndata[NID] are global homogeneous node IDs.
nids = F.boolean_mask(
graph.ndata[NID], _get_inner_node_mask(graph, ntype_id)
)
partids1 = gpb.nid2partid(nids)
_, per_type_nids = gpb.map_to_per_ntype(nids)
partids2 = gpb.nid2partid(per_type_nids, ntype)
assert np.all(F.asnumpy(partids1 == part_id)), (
"Unexpected partition IDs are found in the loaded partition "
"while querying via global homogeneous node IDs."
)
assert np.all(F.asnumpy(partids2 == part_id)), (
"Unexpected partition IDs are found in the loaded partition "
"while querying via type-wise node IDs."
)
for etype in etypes:
etype_id = etypes[etype]
# graph.edata[EID] are global homogeneous edge IDs.
eids = F.boolean_mask(
graph.edata[EID], _get_inner_edge_mask(graph, etype_id)
)
partids1 = gpb.eid2partid(eids)
_, per_type_eids = gpb.map_to_per_etype(eids)
partids2 = gpb.eid2partid(per_type_eids, etype)
assert np.all(F.asnumpy(partids1 == part_id)), (
"Unexpected partition IDs are found in the loaded partition "
"while querying via global homogeneous edge IDs."
)
assert np.all(F.asnumpy(partids2 == part_id)), (
"Unexpected partition IDs are found in the loaded partition "
"while querying via type-wise edge IDs."
)
node_feats = {}
edge_feats = {}
if load_feats:
node_feats, edge_feats = load_partition_feats(part_config, part_id)
return (
graph,
node_feats,
edge_feats,
gpb,
graph_name,
ntypes_list,
etypes_list,
)
def load_partition_feats(
part_config, part_id, load_nodes=True, load_edges=True
):
"""Load node/edge feature data from a partition.
Parameters
----------
part_config : str
The path of the partition config file.
part_id : int
The partition ID.
load_nodes : bool, optional
Whether to load node features. If ``False``, ``None`` is returned.
load_edges : bool, optional
Whether to load edge features. If ``False``, ``None`` is returned.
Returns
-------
Dict[str, Tensor] or None
Node features.
Dict[str, Tensor] or None
Edge features.
"""
config_path = os.path.dirname(part_config)
relative_to_config = lambda path: os.path.join(config_path, path)
with open(part_config) as conf_f:
part_metadata = json.load(conf_f)
assert (
"part-{}".format(part_id) in part_metadata
), "part-{} does not exist".format(part_id)
part_files = part_metadata["part-{}".format(part_id)]
assert (
"node_feats" in part_files
), "the partition does not contain node features."
assert (
"edge_feats" in part_files
), "the partition does not contain edge feature."
node_feats = None
if load_nodes:
feat_path = relative_to_config(part_files["node_feats"])
logging.debug(
"Start to load node data from %s which is " "%d bytes.",
feat_path,
os.path.getsize(feat_path),
)
node_feats = load_tensors(feat_path)
logging.info("Finished loading node data.")
edge_feats = None
if load_edges:
feat_path = relative_to_config(part_files["edge_feats"])
logging.debug(
"Start to load edge data from %s which is " "%d bytes.",
feat_path,
os.path.getsize(feat_path),
)
edge_feats = load_tensors(feat_path)
logging.info("Finished loading edge data.")
# In the old format, the feature name doesn't contain node/edge type.
# For compatibility, let's add node/edge types to the feature names.
if node_feats is not None:
new_feats = {}
for name in node_feats:
feat = node_feats[name]
if name.find("/") == -1:
name = DEFAULT_NTYPE + "/" + name
new_feats[name] = feat
node_feats = new_feats
if edge_feats is not None:
new_feats = {}
for name in edge_feats:
feat = edge_feats[name]
if name.find("/") == -1:
name = _etype_tuple_to_str(DEFAULT_ETYPE) + "/" + name
new_feats[name] = feat
edge_feats = new_feats
return node_feats, edge_feats
def load_partition_book(part_config, part_id):
"""Load a graph partition book from the partition config file.
Parameters
----------
part_config : str
The path of the partition config file.
part_id : int
The partition ID.
Returns
-------
GraphPartitionBook
The global partition information.
str
The graph name
dict
The node types
dict
The edge types
"""
part_metadata = _load_part_config(part_config)
assert "num_parts" in part_metadata, "num_parts does not exist."
assert (
part_metadata["num_parts"] > part_id
), "part {} is out of range (#parts: {})".format(
part_id, part_metadata["num_parts"]
)
num_parts = part_metadata["num_parts"]
assert (
"num_nodes" in part_metadata
), "cannot get the number of nodes of the global graph."
assert (
"num_edges" in part_metadata
), "cannot get the number of edges of the global graph."
assert "node_map" in part_metadata, "cannot get the node map."
assert "edge_map" in part_metadata, "cannot get the edge map."
assert "graph_name" in part_metadata, "cannot get the graph name"
# If this is a range partitioning, node_map actually stores a list, whose elements
# indicate the boundary of range partitioning. Otherwise, node_map stores a filename
# that contains node map in a NumPy array.
node_map = part_metadata["node_map"]
edge_map = part_metadata["edge_map"]
if isinstance(node_map, dict):
for key in node_map:
is_range_part = isinstance(node_map[key], list)
break
elif isinstance(node_map, list):
is_range_part = True
node_map = {DEFAULT_NTYPE: node_map}
else:
is_range_part = False
if isinstance(edge_map, list):
edge_map = {DEFAULT_ETYPE: edge_map}
ntypes = {DEFAULT_NTYPE: 0}
etypes = {DEFAULT_ETYPE: 0}
if "ntypes" in part_metadata:
ntypes = part_metadata["ntypes"]
if "etypes" in part_metadata:
etypes = part_metadata["etypes"]
if isinstance(node_map, dict):
for key in node_map:
assert key in ntypes, "The node type {} is invalid".format(key)
if isinstance(edge_map, dict):
for key in edge_map:
assert key in etypes, "The edge type {} is invalid".format(key)
if not is_range_part:
raise TypeError("Only RangePartitionBook is supported currently.")
node_map = _get_part_ranges(node_map)
edge_map = _get_part_ranges(edge_map)
# Sort the node/edge maps by the node/edge type ID.
node_map = dict(sorted(node_map.items(), key=lambda x: ntypes[x[0]]))
edge_map = dict(sorted(edge_map.items(), key=lambda x: etypes[x[0]]))
def _assert_is_sorted(id_map):
id_ranges = np.array(list(id_map.values()))
ids = []
for i in range(num_parts):
ids.append(id_ranges[:, i, :])
ids = np.array(ids).flatten()
assert np.all(
ids[:-1] <= ids[1:]
), f"The node/edge map is not sorted: {ids}"
_assert_is_sorted(node_map)
_assert_is_sorted(edge_map)
return (
RangePartitionBook(
part_id, num_parts, node_map, edge_map, ntypes, etypes
),
part_metadata["graph_name"],
ntypes,
etypes,
)
def _get_orig_ids(g, sim_g, orig_nids, orig_eids):
"""Convert/construct the original node IDs and edge IDs.
It handles multiple cases:
* If the graph has been reshuffled and it's a homogeneous graph, we just return
the original node IDs and edge IDs in the inputs.
* If the graph has been reshuffled and it's a heterogeneous graph, we need to
split the original node IDs and edge IDs in the inputs based on the node types
and edge types.
* If the graph is not shuffled, the original node IDs and edge IDs don't change.
Parameters
----------
g : DGLGraph
The input graph for partitioning.
sim_g : DGLGraph
The homogeneous version of the input graph.
orig_nids : tensor or None
The original node IDs after the input graph is reshuffled.
orig_eids : tensor or None
The original edge IDs after the input graph is reshuffled.
Returns
-------
tensor or dict of tensors, tensor or dict of tensors
"""
is_hetero = not g.is_homogeneous
if is_hetero:
# Get the type IDs
orig_ntype = F.gather_row(sim_g.ndata[NTYPE], orig_nids)
orig_etype = F.gather_row(sim_g.edata[ETYPE], orig_eids)
# Mapping between shuffled global IDs to original per-type IDs
orig_nids = F.gather_row(sim_g.ndata[NID], orig_nids)
orig_eids = F.gather_row(sim_g.edata[EID], orig_eids)
orig_nids = {
ntype: F.boolean_mask(
orig_nids, orig_ntype == g.get_ntype_id(ntype)
)
for ntype in g.ntypes
}
orig_eids = {
etype: F.boolean_mask(
orig_eids, orig_etype == g.get_etype_id(etype)
)
for etype in g.canonical_etypes
}
return orig_nids, orig_eids
def _set_trainer_ids(g, sim_g, node_parts):
"""Set the trainer IDs for each node and edge on the input graph.
The trainer IDs will be stored as node data and edge data in the input graph.
Parameters
----------
g : DGLGraph
The input graph for partitioning.
sim_g : DGLGraph
The homogeneous version of the input graph.
node_parts : tensor
The node partition ID for each node in `sim_g`.
"""
if g.is_homogeneous:
g.ndata["trainer_id"] = node_parts
# An edge is assigned to a partition based on its destination node.
g.edata["trainer_id"] = F.gather_row(node_parts, g.edges()[1])
else:
for ntype_id, ntype in enumerate(g.ntypes):
type_idx = sim_g.ndata[NTYPE] == ntype_id
orig_nid = F.boolean_mask(sim_g.ndata[NID], type_idx)
trainer_id = F.zeros((len(orig_nid),), F.dtype(node_parts), F.cpu())
F.scatter_row_inplace(
trainer_id, orig_nid, F.boolean_mask(node_parts, type_idx)
)
g.nodes[ntype].data["trainer_id"] = trainer_id
for c_etype in g.canonical_etypes:
# An edge is assigned to a partition based on its destination node.
_, _, dst_type = c_etype
trainer_id = F.gather_row(
g.nodes[dst_type].data["trainer_id"], g.edges(etype=c_etype)[1]
)
g.edges[c_etype].data["trainer_id"] = trainer_id
def partition_graph(
g,
graph_name,
num_parts,
out_path,
num_hops=1,
part_method="metis",
balance_ntypes=None,
balance_edges=False,
return_mapping=False,
num_trainers_per_machine=1,
objtype="cut",
graph_formats=None,
):
"""Partition a graph for distributed training and store the partitions on files.
The partitioning occurs in three steps: 1) run a partition algorithm (e.g., Metis) to
assign nodes to partitions; 2) construct partition graph structure based on
the node assignment; 3) split the node features and edge features based on
the partition result.
When a graph is partitioned, each partition can contain *HALO* nodes, which are assigned
to other partitions but are included in this partition for efficiency purpose.
In this document, *local nodes/edges* refers to the nodes and edges that truly belong to
a partition. The rest are "HALO nodes/edges".
The partitioned data is stored into multiple files organized as follows:
.. code-block:: none
data_root_dir/
|-- graph_name.json # partition configuration file in JSON
|-- node_map.npy # partition id of each node stored in a numpy array (optional)
|-- edge_map.npy # partition id of each edge stored in a numpy array (optional)
|-- part0/ # data for partition 0
|-- node_feats.dgl # node features stored in binary format
|-- edge_feats.dgl # edge features stored in binary format
|-- graph.dgl # graph structure of this partition stored in binary format
|-- part1/ # data for partition 1
|-- node_feats.dgl
|-- edge_feats.dgl
|-- graph.dgl
First, the metadata of the original graph and the partitioning is stored in a JSON file
named after ``graph_name``. This JSON file contains the information of the original graph
as well as the path of the files that store each partition. Below show an example.
.. code-block:: none
{
"graph_name" : "test",
"part_method" : "metis",
"num_parts" : 2,
"halo_hops" : 1,
"node_map": {
"_N": [ [ 0, 1261310 ],
[ 1261310, 2449029 ] ]
},
"edge_map": {
"_N:_E:_N": [ [ 0, 62539528 ],
[ 62539528, 123718280 ] ]
},
"etypes": { "_N:_E:_N": 0 },
"ntypes": { "_N": 0 },
"num_nodes" : 1000000,
"num_edges" : 52000000,
"part-0" : {
"node_feats" : "data_root_dir/part0/node_feats.dgl",
"edge_feats" : "data_root_dir/part0/edge_feats.dgl",
"part_graph" : "data_root_dir/part0/graph.dgl",
},
"part-1" : {
"node_feats" : "data_root_dir/part1/node_feats.dgl",
"edge_feats" : "data_root_dir/part1/edge_feats.dgl",
"part_graph" : "data_root_dir/part1/graph.dgl",
},
}
Here are the definition of the fields in the partition configuration file:
* ``graph_name`` is the name of the graph given by a user.
* ``part_method`` is the method used to assign nodes to partitions.
Currently, it supports "random" and "metis".
* ``num_parts`` is the number of partitions.
* ``halo_hops`` is the number of hops of nodes we include in a partition as HALO nodes.
* ``node_map`` is the node assignment map, which tells the partition ID a node is assigned to.
The format of ``node_map`` is described below.
* ``edge_map`` is the edge assignment map, which tells the partition ID an edge is assigned to.
* ``num_nodes`` is the number of nodes in the global graph.
* ``num_edges`` is the number of edges in the global graph.
* `part-*` stores the data of a partition.
As node/edge IDs are reshuffled, ``node_map`` and ``edge_map`` contains the information
for mapping between global node/edge IDs to partition-local node/edge IDs.
For heterogeneous graphs, the information in ``node_map`` and ``edge_map`` can also be used
to compute node types and edge types. The format of the data in ``node_map`` and ``edge_map``
is as follows:
.. code-block:: none
{
"node_type": [ [ part1_start, part1_end ],
[ part2_start, part2_end ],
... ],
...
},
Essentially, ``node_map`` and ``edge_map`` are dictionaries. The keys are
node etypes and canonical edge types respectively. The values are lists of pairs
containing the start and end of the ID range for the corresponding types in a partition.
The length of the list is the number of
partitions; each element in the list is a tuple that stores the start and the end of
an ID range for a particular node/edge type in the partition.
The graph structure of a partition is stored in a file with the DGLGraph format.
Nodes in each partition is *relabeled* to always start with zero. We call the node
ID in the original graph, *global ID*, while the relabeled ID in each partition,
*local ID*. Each partition graph has an integer node data tensor stored under name
`dgl.NID` and each value is the node's global ID. Similarly, edges are relabeled too
and the mapping from local ID to global ID is stored as an integer edge data tensor
under name `dgl.EID`. For a heterogeneous graph, the DGLGraph also contains a node
data `dgl.NTYPE` for node type and an edge data `dgl.ETYPE` for the edge type.
The partition graph contains additional node data ("inner_node") and
edge data ("inner_edge"):
* "inner_node" indicates whether a node belongs to a partition.
* "inner_edge" indicates whether an edge belongs to a partition.
Node and edge features are splitted and stored together with each graph partition.
All node/edge features in a partition are stored in a file with DGL format. The node/edge
features are stored in dictionaries, in which the key is the node/edge data name and
the value is a tensor. We do not store features of HALO nodes and edges.
When performing Metis partitioning, we can put some constraint on the partitioning.
Current, it supports two constrants to balance the partitioning. By default, Metis
always tries to balance the number of nodes in each partition.
* ``balance_ntypes`` balances the number of nodes of different types in each partition.
* ``balance_edges`` balances the number of edges in each partition.
To balance the node types, a user needs to pass a vector of N elements to indicate
the type of each node. N is the number of nodes in the input graph.
Parameters
----------
g : DGLGraph
The input graph to partition
graph_name : str
The name of the graph. The name will be used to construct
:py:meth:`~dgl.distributed.DistGraph`.
num_parts : int
The number of partitions
out_path : str
The path to store the files for all partitioned data.
num_hops : int, optional
The number of hops of HALO nodes we construct on a partition graph structure.
The default value is 1.
part_method : str, optional
The partition method. It supports "random" and "metis". The default value is "metis".
balance_ntypes : tensor, optional
Node type of each node. This is a 1D-array of integers. Its values indicates the node
type of each node. This argument is used by Metis partition. When the argument is
specified, the Metis algorithm will try to partition the input graph into partitions where
each partition has roughly the same number of nodes for each node type. The default value
is None, which means Metis partitions the graph to only balance the number of nodes.
balance_edges : bool
Indicate whether to balance the edges in each partition. This argument is used by
the Metis algorithm.
return_mapping : bool
Indicate whether to return the mapping between shuffled node/edge IDs and the original
node/edge IDs.
num_trainers_per_machine : int, optional
The number of trainers per machine. If is not 1, the whole graph will be first partitioned
to each trainer, that is num_parts*num_trainers_per_machine parts. And the trainer ids of
each node will be stored in the node feature 'trainer_id'. Then the partitions of trainers
on the same machine will be coalesced into one larger partition. The final number of
partitions is `num_part`.
objtype : str, "cut" or "vol"
Set the objective as edge-cut minimization or communication volume minimization. This
argument is used by the Metis algorithm.
graph_formats : str or list[str]
Save partitions in specified formats. It could be any combination of ``coo``,
``csc`` and ``csr``. If not specified, save one format only according to what
format is available. If multiple formats are available, selection priority
from high to low is ``coo``, ``csc``, ``csr``.
Returns
-------
Tensor or dict of tensors, optional
If `return_mapping=True`, return a 1D tensor that indicates the mapping between shuffled
node IDs and the original node IDs for a homogeneous graph; return a dict of 1D tensors
whose key is the node type and value is a 1D tensor mapping between shuffled node IDs and
the original node IDs for each node type for a heterogeneous graph.
Tensor or dict of tensors, optional
If `return_mapping=True`, return a 1D tensor that indicates the mapping between shuffled
edge IDs and the original edge IDs for a homogeneous graph; return a dict of 1D tensors
whose key is the edge type and value is a 1D tensor mapping between shuffled edge IDs and
the original edge IDs for each edge type for a heterogeneous graph.
Examples
--------
>>> dgl.distributed.partition_graph(g, 'test', 4, num_hops=1, part_method='metis',
... out_path='output/',
... balance_ntypes=g.ndata['train_mask'],
... balance_edges=True)
>>> (
... g, node_feats, edge_feats, gpb, graph_name, ntypes_list, etypes_list,
... ) = dgl.distributed.load_partition('output/test.json', 0)
"""
# 'coo' is required for partition
assert "coo" in np.concatenate(
list(g.formats().values())
), "'coo' format should be allowed for partitioning graph."
def get_homogeneous(g, balance_ntypes):
if g.is_homogeneous:
sim_g = to_homogeneous(g)
if isinstance(balance_ntypes, dict):
assert len(balance_ntypes) == 1
bal_ntypes = list(balance_ntypes.values())[0]
else:
bal_ntypes = balance_ntypes
elif isinstance(balance_ntypes, dict):
# Here we assign node types for load balancing.
# The new node types includes the ones provided by users.
num_ntypes = 0
for key in g.ntypes:
if key in balance_ntypes:
g.nodes[key].data["bal_ntype"] = (
F.astype(balance_ntypes[key], F.int32) + num_ntypes
)
uniq_ntypes = F.unique(balance_ntypes[key])
assert np.all(
F.asnumpy(uniq_ntypes) == np.arange(len(uniq_ntypes))
)
num_ntypes += len(uniq_ntypes)
else:
g.nodes[key].data["bal_ntype"] = (
F.ones((g.num_nodes(key),), F.int32, F.cpu())
* num_ntypes
)
num_ntypes += 1
sim_g = to_homogeneous(g, ndata=["bal_ntype"])
bal_ntypes = sim_g.ndata["bal_ntype"]
print(
"The graph has {} node types and balance among {} types".format(
len(g.ntypes), len(F.unique(bal_ntypes))
)
)
# We now no longer need them.
for key in g.ntypes:
del g.nodes[key].data["bal_ntype"]
del sim_g.ndata["bal_ntype"]
else:
sim_g = to_homogeneous(g)
bal_ntypes = sim_g.ndata[NTYPE]
return sim_g, bal_ntypes
if objtype not in ["cut", "vol"]:
raise ValueError
if num_parts == 1:
start = time.time()
sim_g, balance_ntypes = get_homogeneous(g, balance_ntypes)
print(
"Converting to homogeneous graph takes {:.3f}s, peak mem: {:.3f} GB".format(
time.time() - start, get_peak_mem()
)
)
assert num_trainers_per_machine >= 1
if num_trainers_per_machine > 1:
# First partition the whole graph to each trainer and save the trainer ids in
# the node feature "trainer_id".
start = time.time()
node_parts = metis_partition_assignment(
sim_g,
num_parts * num_trainers_per_machine,
balance_ntypes=balance_ntypes,
balance_edges=balance_edges,
mode="k-way",
)
_set_trainer_ids(g, sim_g, node_parts)
print(
"Assigning nodes to METIS partitions takes {:.3f}s, peak mem: {:.3f} GB".format(
time.time() - start, get_peak_mem()
)
)
node_parts = F.zeros((sim_g.num_nodes(),), F.int64, F.cpu())
parts = {0: sim_g.clone()}
orig_nids = parts[0].ndata[NID] = F.arange(0, sim_g.num_nodes())
orig_eids = parts[0].edata[EID] = F.arange(0, sim_g.num_edges())
# For one partition, we don't really shuffle nodes and edges. We just need to simulate
# it and set node data and edge data of orig_id.
parts[0].ndata["orig_id"] = orig_nids
parts[0].edata["orig_id"] = orig_eids
if return_mapping:
if g.is_homogeneous:
orig_nids = F.arange(0, sim_g.num_nodes())
orig_eids = F.arange(0, sim_g.num_edges())
else:
orig_nids = {
ntype: F.arange(0, g.num_nodes(ntype)) for ntype in g.ntypes
}
orig_eids = {
etype: F.arange(0, g.num_edges(etype))
for etype in g.canonical_etypes
}
parts[0].ndata["inner_node"] = F.ones(
(sim_g.num_nodes(),),
RESERVED_FIELD_DTYPE["inner_node"],
F.cpu(),
)
parts[0].edata["inner_edge"] = F.ones(
(sim_g.num_edges(),),
RESERVED_FIELD_DTYPE["inner_edge"],
F.cpu(),
)
elif part_method in ("metis", "random"):
start = time.time()
sim_g, balance_ntypes = get_homogeneous(g, balance_ntypes)
print(
"Converting to homogeneous graph takes {:.3f}s, peak mem: {:.3f} GB".format(
time.time() - start, get_peak_mem()
)
)
if part_method == "metis":
assert num_trainers_per_machine >= 1
start = time.time()
if num_trainers_per_machine > 1:
# First partition the whole graph to each trainer and save the trainer ids in
# the node feature "trainer_id".
node_parts = metis_partition_assignment(
sim_g,
num_parts * num_trainers_per_machine,
balance_ntypes=balance_ntypes,
balance_edges=balance_edges,
mode="k-way",
objtype=objtype,
)
_set_trainer_ids(g, sim_g, node_parts)
# And then coalesce the partitions of trainers on the same machine into one
# larger partition.
node_parts = F.floor_div(node_parts, num_trainers_per_machine)
else:
node_parts = metis_partition_assignment(
sim_g,
num_parts,
balance_ntypes=balance_ntypes,
balance_edges=balance_edges,
objtype=objtype,
)
print(
"Assigning nodes to METIS partitions takes {:.3f}s, peak mem: {:.3f} GB".format(
time.time() - start, get_peak_mem()
)
)
else:
node_parts = random_choice(num_parts, sim_g.num_nodes())
start = time.time()
parts, orig_nids, orig_eids = partition_graph_with_halo(
sim_g, node_parts, num_hops, reshuffle=True
)
print(
"Splitting the graph into partitions takes {:.3f}s, peak mem: {:.3f} GB".format(
time.time() - start, get_peak_mem()
)
)
if return_mapping:
orig_nids, orig_eids = _get_orig_ids(g, sim_g, orig_nids, orig_eids)
else:
raise Exception("Unknown partitioning method: " + part_method)
# If the input is a heterogeneous graph, get the original node types and original node IDs.
# `part' has three types of node data at this point.
# NTYPE: the node type.
# orig_id: the global node IDs in the homogeneous version of input graph.
# NID: the global node IDs in the reshuffled homogeneous version of the input graph.
if not g.is_homogeneous:
for name in parts:
orig_ids = parts[name].ndata["orig_id"]
ntype = F.gather_row(sim_g.ndata[NTYPE], orig_ids)
parts[name].ndata[NTYPE] = F.astype(
ntype, RESERVED_FIELD_DTYPE[NTYPE]
)
assert np.all(
F.asnumpy(ntype) == F.asnumpy(parts[name].ndata[NTYPE])
)
# Get the original edge types and original edge IDs.
orig_ids = parts[name].edata["orig_id"]
etype = F.gather_row(sim_g.edata[ETYPE], orig_ids)
parts[name].edata[ETYPE] = F.astype(
etype, RESERVED_FIELD_DTYPE[ETYPE]
)
assert np.all(
F.asnumpy(etype) == F.asnumpy(parts[name].edata[ETYPE])
)
# Calculate the global node IDs to per-node IDs mapping.
inner_ntype = F.boolean_mask(
parts[name].ndata[NTYPE], parts[name].ndata["inner_node"] == 1
)
inner_nids = F.boolean_mask(
parts[name].ndata[NID], parts[name].ndata["inner_node"] == 1
)
for ntype in g.ntypes:
inner_ntype_mask = inner_ntype == g.get_ntype_id(ntype)
typed_nids = F.boolean_mask(inner_nids, inner_ntype_mask)
# inner node IDs are in a contiguous ID range.
expected_range = np.arange(
int(F.as_scalar(typed_nids[0])),
int(F.as_scalar(typed_nids[-1])) + 1,
)
assert np.all(F.asnumpy(typed_nids) == expected_range)
# Calculate the global edge IDs to per-edge IDs mapping.
inner_etype = F.boolean_mask(
parts[name].edata[ETYPE], parts[name].edata["inner_edge"] == 1
)
inner_eids = F.boolean_mask(
parts[name].edata[EID], parts[name].edata["inner_edge"] == 1
)
for etype in g.canonical_etypes:
inner_etype_mask = inner_etype == g.get_etype_id(etype)
typed_eids = np.sort(
F.asnumpy(F.boolean_mask(inner_eids, inner_etype_mask))
)
assert np.all(
typed_eids
== np.arange(int(typed_eids[0]), int(typed_eids[-1]) + 1)
)
os.makedirs(out_path, mode=0o775, exist_ok=True)
tot_num_inner_edges = 0
out_path = os.path.abspath(out_path)
# With reshuffling, we can ensure that all nodes and edges are reshuffled
# and are in contiguous ID space.
if num_parts > 1:
node_map_val = {}
edge_map_val = {}
for ntype in g.ntypes:
ntype_id = g.get_ntype_id(ntype)
val = []
node_map_val[ntype] = []
for i in parts:
inner_node_mask = _get_inner_node_mask(parts[i], ntype_id)
val.append(
F.as_scalar(F.sum(F.astype(inner_node_mask, F.int64), 0))
)
inner_nids = F.boolean_mask(
parts[i].ndata[NID], inner_node_mask
)
node_map_val[ntype].append(
[
int(F.as_scalar(inner_nids[0])),
int(F.as_scalar(inner_nids[-1])) + 1,
]
)
val = np.cumsum(val).tolist()
assert val[-1] == g.num_nodes(ntype)
for etype in g.canonical_etypes:
etype_id = g.get_etype_id(etype)
val = []
edge_map_val[etype] = []
for i in parts:
inner_edge_mask = _get_inner_edge_mask(parts[i], etype_id)
val.append(
F.as_scalar(F.sum(F.astype(inner_edge_mask, F.int64), 0))
)
inner_eids = np.sort(
F.asnumpy(
F.boolean_mask(parts[i].edata[EID], inner_edge_mask)
)
)
edge_map_val[etype].append(
[int(inner_eids[0]), int(inner_eids[-1]) + 1]
)
val = np.cumsum(val).tolist()
assert val[-1] == g.num_edges(etype)
else:
node_map_val = {}
edge_map_val = {}
for ntype in g.ntypes:
ntype_id = g.get_ntype_id(ntype)
inner_node_mask = _get_inner_node_mask(parts[0], ntype_id)
inner_nids = F.boolean_mask(parts[0].ndata[NID], inner_node_mask)
node_map_val[ntype] = [
[
int(F.as_scalar(inner_nids[0])),
int(F.as_scalar(inner_nids[-1])) + 1,
]
]
for etype in g.canonical_etypes:
etype_id = g.get_etype_id(etype)
inner_edge_mask = _get_inner_edge_mask(parts[0], etype_id)
inner_eids = F.boolean_mask(parts[0].edata[EID], inner_edge_mask)
edge_map_val[etype] = [
[
int(F.as_scalar(inner_eids[0])),
int(F.as_scalar(inner_eids[-1])) + 1,
]
]
# Double check that the node IDs in the global ID space are sorted.
for ntype in node_map_val:
val = np.concatenate([np.array(l) for l in node_map_val[ntype]])
assert np.all(val[:-1] <= val[1:])
for etype in edge_map_val:
val = np.concatenate([np.array(l) for l in edge_map_val[etype]])
assert np.all(val[:-1] <= val[1:])
start = time.time()
ntypes = {ntype: g.get_ntype_id(ntype) for ntype in g.ntypes}
etypes = {etype: g.get_etype_id(etype) for etype in g.canonical_etypes}
part_metadata = {
"graph_name": graph_name,
"num_nodes": g.num_nodes(),
"num_edges": g.num_edges(),
"part_method": part_method,
"num_parts": num_parts,
"halo_hops": num_hops,
"node_map": node_map_val,
"edge_map": edge_map_val,
"ntypes": ntypes,
"etypes": etypes,
}
for part_id in range(num_parts):
part = parts[part_id]
# Get the node/edge features of each partition.
node_feats = {}
edge_feats = {}
if num_parts > 1:
for ntype in g.ntypes:
ntype_id = g.get_ntype_id(ntype)
# To get the edges in the input graph, we should use original node IDs.
# Both orig_id and NID stores the per-node-type IDs.
ndata_name = "orig_id"
inner_node_mask = _get_inner_node_mask(part, ntype_id)
# This is global node IDs.
local_nodes = F.boolean_mask(
part.ndata[ndata_name], inner_node_mask
)
if len(g.ntypes) > 1:
# If the input is a heterogeneous graph.
local_nodes = F.gather_row(sim_g.ndata[NID], local_nodes)
print(
"part {} has {} nodes of type {} and {} are inside the partition".format(
part_id,
F.as_scalar(
F.sum(part.ndata[NTYPE] == ntype_id, 0)
),
ntype,
len(local_nodes),
)
)
else:
print(
"part {} has {} nodes and {} are inside the partition".format(
part_id, part.num_nodes(), len(local_nodes)
)
)
for name in g.nodes[ntype].data:
if name in [NID, "inner_node"]:
continue
node_feats[ntype + "/" + name] = F.gather_row(
g.nodes[ntype].data[name], local_nodes
)
for etype in g.canonical_etypes:
etype_id = g.get_etype_id(etype)
edata_name = "orig_id"
inner_edge_mask = _get_inner_edge_mask(part, etype_id)
# This is global edge IDs.
local_edges = F.boolean_mask(
part.edata[edata_name], inner_edge_mask
)
if not g.is_homogeneous:
local_edges = F.gather_row(sim_g.edata[EID], local_edges)
print(
"part {} has {} edges of type {} and {} are inside the partition".format(
part_id,
F.as_scalar(
F.sum(part.edata[ETYPE] == etype_id, 0)
),
etype,
len(local_edges),
)
)
else:
print(
"part {} has {} edges and {} are inside the partition".format(
part_id, part.num_edges(), len(local_edges)
)
)
tot_num_inner_edges += len(local_edges)
for name in g.edges[etype].data:
if name in [EID, "inner_edge"]:
continue
edge_feats[
_etype_tuple_to_str(etype) + "/" + name
] = F.gather_row(g.edges[etype].data[name], local_edges)
else:
for ntype in g.ntypes:
if len(g.ntypes) > 1:
ndata_name = "orig_id"
ntype_id = g.get_ntype_id(ntype)
inner_node_mask = _get_inner_node_mask(part, ntype_id)
# This is global node IDs.
local_nodes = F.boolean_mask(
part.ndata[ndata_name], inner_node_mask
)
local_nodes = F.gather_row(sim_g.ndata[NID], local_nodes)
else:
local_nodes = sim_g.ndata[NID]
for name in g.nodes[ntype].data:
if name in [NID, "inner_node"]:
continue
node_feats[ntype + "/" + name] = F.gather_row(
g.nodes[ntype].data[name], local_nodes
)
for etype in g.canonical_etypes:
if not g.is_homogeneous:
edata_name = "orig_id"
etype_id = g.get_etype_id(etype)
inner_edge_mask = _get_inner_edge_mask(part, etype_id)
# This is global edge IDs.
local_edges = F.boolean_mask(
part.edata[edata_name], inner_edge_mask
)
local_edges = F.gather_row(sim_g.edata[EID], local_edges)
else:
local_edges = sim_g.edata[EID]
for name in g.edges[etype].data:
if name in [EID, "inner_edge"]:
continue
edge_feats[
_etype_tuple_to_str(etype) + "/" + name
] = F.gather_row(g.edges[etype].data[name], local_edges)
# delete `orig_id` from ndata/edata
del part.ndata["orig_id"]
del part.edata["orig_id"]
part_dir = os.path.join(out_path, "part" + str(part_id))
node_feat_file = os.path.join(part_dir, "node_feat.dgl")
edge_feat_file = os.path.join(part_dir, "edge_feat.dgl")
part_graph_file = os.path.join(part_dir, "graph.dgl")
part_metadata["part-{}".format(part_id)] = {
"node_feats": os.path.relpath(node_feat_file, out_path),
"edge_feats": os.path.relpath(edge_feat_file, out_path),
"part_graph": os.path.relpath(part_graph_file, out_path),
}
os.makedirs(part_dir, mode=0o775, exist_ok=True)
save_tensors(node_feat_file, node_feats)
save_tensors(edge_feat_file, edge_feats)
sort_etypes = len(g.etypes) > 1
_save_graphs(
part_graph_file,
[part],
formats=graph_formats,
sort_etypes=sort_etypes,
)
print(
"Save partitions: {:.3f} seconds, peak memory: {:.3f} GB".format(
time.time() - start, get_peak_mem()
)
)
_dump_part_config(f"{out_path}/{graph_name}.json", part_metadata)
num_cuts = sim_g.num_edges() - tot_num_inner_edges
if num_parts == 1:
num_cuts = 0
print(
"There are {} edges in the graph and {} edge cuts for {} partitions.".format(
g.num_edges(), num_cuts, num_parts
)
)
if return_mapping:
return orig_nids, orig_eids
def convert_dgl_partition_to_csc_sampling_graph(part_config):
"""Convert partitions of dgl to CSCSamplingGraph of GraphBolt.
This API converts `DGLGraph` partitions to `CSCSamplingGraph` which is
dedicated for sampling in `GraphBolt`. New graphs will be stored alongside
original graph as `csc_sampling_graph.tar`.
In the near future, partitions are supposed to be saved as
`CSCSamplingGraph` directly. At that time, this API should be deprecated.
Parameters
----------
part_config : str
The partition configuration JSON file.
"""
# As only this function requires GraphBolt for now, let's import here.
from .. import graphbolt
part_meta = _load_part_config(part_config)
num_parts = part_meta["num_parts"]
# Utility functions.
def init_type_per_edge(graph, gpb):
etype_ids = gpb.map_to_per_etype(graph.edata[EID])[0]
return etype_ids
# Iterate over partitions.
for part_id in range(num_parts):
graph, _, _, gpb, _, _, _ = load_partition(
part_config, part_id, load_feats=False
)
# Construct GraphMetadata.
_, _, ntypes, etypes = load_partition_book(part_config, part_id)
metadata = graphbolt.GraphMetadata(ntypes, etypes)
# Obtain CSC indtpr and indices.
indptr, indices, _ = graph.adj().csc()
# Initalize type per edge.
type_per_edge = init_type_per_edge(graph, gpb)
type_per_edge = type_per_edge.to(RESERVED_FIELD_DTYPE[ETYPE])
# Sanity check.
assert len(type_per_edge) == graph.num_edges()
csc_graph = graphbolt.from_csc(
indptr, indices, None, type_per_edge, metadata=metadata
)
orig_graph_path = os.path.join(
os.path.dirname(part_config),
part_meta[f"part-{part_id}"]["part_graph"],
)
csc_graph_path = os.path.join(
os.path.dirname(orig_graph_path), "csc_sampling_graph.tar"
)
graphbolt.save_csc_sampling_graph(csc_graph, csc_graph_path)
|
9f7589a3bf0201f13222b9fe87c0f8b25156af25
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayInsAutoAutoaftermarketOutorderSyncModel.py
|
20c33323b82b8671889420e59ca40d7398e8001a
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,470
|
py
|
AlipayInsAutoAutoaftermarketOutorderSyncModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsAutoAutoaftermarketOutorderSyncModel(object):
def __init__(self):
self._action = None
self._actual_pay_amount = None
self._alipay_id = None
self._biz_time = None
self._category = None
self._out_order_no = None
self._pay_trade_no = None
self._prod_title = None
@property
def action(self):
return self._action
@action.setter
def action(self, value):
self._action = value
@property
def actual_pay_amount(self):
return self._actual_pay_amount
@actual_pay_amount.setter
def actual_pay_amount(self, value):
self._actual_pay_amount = value
@property
def alipay_id(self):
return self._alipay_id
@alipay_id.setter
def alipay_id(self, value):
self._alipay_id = value
@property
def biz_time(self):
return self._biz_time
@biz_time.setter
def biz_time(self, value):
self._biz_time = value
@property
def category(self):
return self._category
@category.setter
def category(self, value):
self._category = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def pay_trade_no(self):
return self._pay_trade_no
@pay_trade_no.setter
def pay_trade_no(self, value):
self._pay_trade_no = value
@property
def prod_title(self):
return self._prod_title
@prod_title.setter
def prod_title(self, value):
self._prod_title = value
def to_alipay_dict(self):
params = dict()
if self.action:
if hasattr(self.action, 'to_alipay_dict'):
params['action'] = self.action.to_alipay_dict()
else:
params['action'] = self.action
if self.actual_pay_amount:
if hasattr(self.actual_pay_amount, 'to_alipay_dict'):
params['actual_pay_amount'] = self.actual_pay_amount.to_alipay_dict()
else:
params['actual_pay_amount'] = self.actual_pay_amount
if self.alipay_id:
if hasattr(self.alipay_id, 'to_alipay_dict'):
params['alipay_id'] = self.alipay_id.to_alipay_dict()
else:
params['alipay_id'] = self.alipay_id
if self.biz_time:
if hasattr(self.biz_time, 'to_alipay_dict'):
params['biz_time'] = self.biz_time.to_alipay_dict()
else:
params['biz_time'] = self.biz_time
if self.category:
if hasattr(self.category, 'to_alipay_dict'):
params['category'] = self.category.to_alipay_dict()
else:
params['category'] = self.category
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.pay_trade_no:
if hasattr(self.pay_trade_no, 'to_alipay_dict'):
params['pay_trade_no'] = self.pay_trade_no.to_alipay_dict()
else:
params['pay_trade_no'] = self.pay_trade_no
if self.prod_title:
if hasattr(self.prod_title, 'to_alipay_dict'):
params['prod_title'] = self.prod_title.to_alipay_dict()
else:
params['prod_title'] = self.prod_title
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsAutoAutoaftermarketOutorderSyncModel()
if 'action' in d:
o.action = d['action']
if 'actual_pay_amount' in d:
o.actual_pay_amount = d['actual_pay_amount']
if 'alipay_id' in d:
o.alipay_id = d['alipay_id']
if 'biz_time' in d:
o.biz_time = d['biz_time']
if 'category' in d:
o.category = d['category']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'pay_trade_no' in d:
o.pay_trade_no = d['pay_trade_no']
if 'prod_title' in d:
o.prod_title = d['prod_title']
return o
|
19fc0363a6023f797ec489a5b082b64b1e77f78e
|
7c91ff850f81bf8759b055971d592a71ef025732
|
/examples/programming_guide/image_viewer.py
|
40044bfd7182a0d5c577f2264c54191ce42ce07a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
pyglet/pyglet
|
d9da2cccd52a6bc5c09548536876602f6e1412f0
|
094c638f0529fecab4e74556487b92453a78753c
|
refs/heads/master
| 2023-08-17T15:08:09.192350
| 2023-08-17T01:51:50
| 2023-08-17T01:51:50
| 191,043,601
| 1,687
| 427
|
BSD-3-Clause
| 2023-09-14T08:51:31
| 2019-06-09T18:55:00
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
image_viewer.py
|
#!/usr/bin/env python
"""
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet
window = pyglet.window.Window()
image = pyglet.resource.image('kitten.jpg')
@window.event
def on_draw():
window.clear()
image.blit(0, 0)
pyglet.app.run()
|
a1cefd4bb235c2aef694c53f1604835e326a2c22
|
6a531e292af43d3e7aec6d3019e0333362afe454
|
/src/cactus/refmap/cactus_refmap.py
|
081cda30154967331dac127b61df32ebc01a88a2
|
[
"MIT"
] |
permissive
|
ComparativeGenomicsToolkit/cactus
|
2d4d891b6b3af0a22b64aba11f18a867a7666e58
|
41d99360cfa79ada5bdca307883c30c7fb59a06d
|
refs/heads/master
| 2023-09-01T20:38:37.550816
| 2023-09-01T14:47:06
| 2023-09-01T14:47:06
| 1,317,650
| 369
| 106
|
NOASSERTION
| 2023-09-05T20:27:44
| 2011-02-01T20:17:33
|
C
|
UTF-8
|
Python
| false
| false
| 16,851
|
py
|
cactus_refmap.py
|
#!/usr/bin/env python3
"""Feature Wishlist
Top priority:
Internal contig name system (see prependUniqueIDs in cactus/src/cactus/pipeline/cactus_workflow.py, line 465)
Called in line 529:
renamedInputSeqDir = fileStore.getLocalTempDir()
uniqueFas = prependUniqueIDs(sequences, renamedInputSeqDir)
uniqueFaIDs = [fileStore.writeGlobalFile(seq, cleanup=True) for seq in uniqueFas]
I'm currently calling prependUniqueIDs outside of the workflow/fileStore. I should
check to make sure that Cactus also ultimately exports the uniqueID files,
like I'm doing. Otherwise, I should delete the prepended IDs before finishing
the pipeline.
Implement options.pathOverrides
Implement "Progressive Cactus Options" (see cactus_blast ArgumentParser)
This includes s3 compatibility, I think?
logger.info timer (see line 91 of cactus_blast)
"""
from toil.common import Toil
from toil.job import Job
from toil.statsAndLogging import logger
import os
from argparse import ArgumentParser
import collections as col
import xml.etree.ElementTree as ET
from cactus.refmap import paf_to_lastz
from cactus.refmap import fasta_preprocessing
from cactus.refmap import apply_dipcall_bed_filter
from cactus.shared.common import setupBinaries, importSingularityImage
from cactus.shared.common import makeURL
from cactus.shared.common import cactus_call
from cactus.shared.configWrapper import ConfigWrapper
from cactus.shared.common import cactusRootPath
from cactus.progressive.progressive_decomposition import compute_outgroups, parse_seqfile, get_subtree, get_spanning_subtree, get_event_set
from cactus.preprocessor.checkUniqueHeaders import sanitize_fasta_headers
## utilitary fxns:
def variation_length(lastz_cig):
"""Determines how long the mapping is (sum of insertion/deletion/match), based on the
lastz cig.
Args:
lastz_cig (string): a lastz cigar, e.g. "M50D3I5M30"
Returns:
int: sum of I+D+M in cigar.
"""
# Parsing cigars:
# indices 0, 2, 4,... are the type of variation.
# indices 1, 3, 5,... are the length of the variation of that type.
# there should be an even number of entries in the cig list: pairs of type, value.
var_len = 0
for i in range(0, len(lastz_cig), 2):
print(i, lastz_cig[i], lastz_cig[i+1])
if lastz_cig[i] in "IDM":
var_len += int(lastz_cig[i+1])
return var_len
def unpack_promise(job, iterable, i):
"""
passed an iterable and a location i, returns ith item. Useful for accessing the
contents of a toil promise.
"""
return iterable[i]
def consolidate_mappings(job, mapping_files):
"""
Warning: discards headers of all mapping files.
Given a list of mapping files, consolidates the contents (not counting headers) into a
single file.
"""
consolidated_mappings = job.fileStore.getLocalTempFile()
with open(consolidated_mappings,"w") as outfile:
for mapping_file in mapping_files.values():
with open(job.fileStore.readGlobalFile(mapping_file)) as inf:
for line in inf:
if not line.startswith("@"):
outfile.write(line)
return job.fileStore.writeGlobalFile(consolidated_mappings)
def empty(job):
"""
An empty job, for easier toil job organization.
"""
return
## dipcall filter functions
def apply_dipcall_vcf_filter(job, infile, min_var_len=50000, min_mapq=5):
"""Filters out all mappings below min_var_len and min_mapq from a lastz file.
NOTE: Assumes all secondary mappings are already removed.
Also: lastz cigars need to have <score> field filled with mapq, not raw score.
Args:
infile (lastz cigar format): [description]
outfile ([type]): [description]
min_var_len ([type]): [description]
min_mapq ([type]): [description]
"""
with open(job.fileStore.readGlobalFile(infile)) as inf:
filtered = job.fileStore.getLocalTempFile()
with open(filtered, "w+") as outf:
for line in inf:
parsed = line.split()
if variation_length(parsed[10:]) >= min_var_len:
if int(parsed[9]) >= min_mapq:
outf.write(line)
return job.fileStore.writeGlobalFile(filtered)
def filter_out_secondaries_from_paf(job, paf):
"""
Removes all secondary mappings from paf file.
"""
primary_paf = job.fileStore.getLocalTempFile()
with open(primary_paf, "w") as outf:
with open(job.fileStore.readGlobalFile(paf)) as inf:
for line in inf:
parsed = line.split()
for i in parsed[11:]:
if i[:6] == "tp:A:P" or i[:6] == "tp:A:I":
outf.write(line)
break
return job.fileStore.writeGlobalFile(primary_paf)
## mapping fxns:
def run_cactus_reference_align(job, assembly_files, reference, debug_export=False, dipcall_bed_filter=False, dipcall_vcf_filter=False):
"""
Preprocesses assemblies, then runs mappings.
"""
sanitize_job = job.addChildJobFn(sanitize_fasta_headers, assembly_files)
mappings = sanitize_job.addFollowOnJobFn(map_all_to_ref, sanitize_job.rv(), reference, debug_export, dipcall_bed_filter, dipcall_vcf_filter).rv()
return mappings
def map_all_to_ref(job, assembly_files, reference, debug_export=False, dipcall_bed_filter=False, dipcall_vcf_filter=False):
"""The meat of cactus-reference-align. Performs all mappings; applies dipcall
filter_bed_filter if necessary, then converts to lastz cigars.
Args:
assembly_files (orderedDict): key: asm_name; value: asm_file
reference (string): asm_name of the reference.
debug_export (bool): Export some intermediate files of the workflow, for debugging. Defaults to False.
dipcall_bed_filter (bool): Apply the dipcall bed filter to the mappings. This will:
Guarantee that there will be no overlapping mappings.
* include mappings >=min_var_len=50kb in size.
* BUT: will exclude regions of these mappings which overlap any other mappings >=min_size_mapping=10kb in size. (This includes other >=50kb mappings).
* all mappings considered for inclusion or overlap must have >= 5 mapQ.
Defaults to False.
dipcall_vcf_filter (bool): Applies the preliminary requirements for the less-stringent vcf-filter. Ultimately, vcf-filter:
* removes all secondary mappings
* Filters out all mappings below min_var_len=50k and min_mapq=5 from a lastz file
Defaults to False.
"""
lead_job = job.addChildJobFn(empty)
# map all assemblies to the reference. Don't map reference to reference, though.
ref_mappings = dict()
secondary_mappings = dict()
primary_mappings = dict()
for assembly, assembly_file in assembly_files.items():
if assembly != reference:
# map to a to b
print("about to run map a to b. a:", assembly, job.fileStore.readGlobalFile(assembly_file), "b (ref):", reference, job.fileStore.readGlobalFile(assembly_files[reference]))
map_job = lead_job.addChildJobFn(map_a_to_b, assembly_file, assembly_files[reference], (dipcall_bed_filter or dipcall_vcf_filter))
ref_mappings[assembly] = map_job.rv()
if dipcall_bed_filter:
secondaries_filter_job = map_job.addFollowOnJobFn(filter_out_secondaries_from_paf, ref_mappings[assembly])
primary_paf = secondaries_filter_job.rv()
dipcall_bed_filter_job = secondaries_filter_job.addFollowOnJobFn(apply_dipcall_bed_filter.apply_dipcall_bed_filter, primary_paf)
bed_filtered_primary_mappings = dipcall_bed_filter_job.rv()
paf_mappings = bed_filtered_primary_mappings
else:
# convert mapping to lastz (and filter into primary and secondary mappings)
paf_mappings = ref_mappings[assembly]
# extract the primary and secondary mappings.
primary_mappings[assembly] = paf_mappings
secondary_mappings[assembly] = None
# consolidate the primary mappings into a single file; same for secondary mappings.
all_primary = lead_job.addFollowOnJobFn(consolidate_mappings, primary_mappings).rv()
return all_primary
def map_a_to_b(job, a, b, dipcall_filter):
"""Maps fasta a to fasta b.
Args:
a (global file): fasta file a. In map_all_to_ref, a is an assembly fasta.
b (global file): fasta file b. In map_all_to_ref, b is the reference.
Returns:
[type]: [description]
"""
print("in map a to b. a:", a, "b:", b)
# map_to_ref_paf = job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile())
tmp = job.fileStore.getLocalTempFile()
map_to_ref_paf = job.fileStore.writeGlobalFile(tmp)
if dipcall_filter:
# note: in dipcall, they include argument "--paf-no-hit".
# I don't see why they would include these "mappings", only to be filtered out
# later. I have not included the argument.
cactus_call(parameters=["minimap2", "-c", "-xasm5", "--cs", "-r2k", "-o", job.fileStore.readGlobalFile(map_to_ref_paf),
job.fileStore.readGlobalFile(b), job.fileStore.readGlobalFile(a)])
else:
cactus_call(parameters=["minimap2", "-cx", "asm5", "-o", job.fileStore.readGlobalFile(map_to_ref_paf),
job.fileStore.readGlobalFile(b), job.fileStore.readGlobalFile(a)])
return map_to_ref_paf
## main fxn and interface:
def get_options():
parser = ArgumentParser()
Job.Runner.addToilOptions(parser)
# addCactusWorkflowOptions(parser)
# ### For quick debugging of apply_dipcall_bed_filter:
# parser.add_argument('paf', type=str,
# help='For quick debugging of apply_dipcall_bed_filter.')
# options for basic input/output
parser.add_argument('seqFile', type=str,
help='A file containing all the information specified by cactus in construction. This aligner ignores the newick tree.')
parser.add_argument('reference', type=str,
help='Specifies which asm in seqFile should be treated as the reference.')
parser.add_argument("outputFile", type=str, help = "Output pairwise alignment file")
parser.add_argument("--pathOverrides", nargs="*", help="paths (multiple allowd) to override from seqFile")
parser.add_argument("--pathOverrideNames", nargs="*", help="names (must be same number as --paths) of path overrides")
# dipcall-like filters
parser.add_argument('--dipcall_bed_filter', action='store_true',
help="Applies filters & minimap2 arguments used to make the bedfile in dipcall. Only affects the primary mappings file. Secondary mappings aren't used in dipcall.")
parser.add_argument('--dipcall_vcf_filter', action='store_true',
help="Applies filters & minimap2 arguments used to make the vcf in dipcall. Only affects the primary mappings file. Secondary mappings aren't used in dipcall.")
# Progressive Cactus Options:
parser.add_argument("--configFile", dest="configFile",
help="Specify cactus configuration file",
default=os.path.join(cactusRootPath(), "cactus_progressive_config.xml"))
parser.add_argument("--latest", dest="latest", action="store_true",
help="Use the latest version of the docker container "
"rather than pulling one matching this version of cactus")
parser.add_argument("--binariesMode", choices=["docker", "local", "singularity"],
help="The way to run the Cactus binaries", default=None)
parser.add_argument("--containerImage", dest="containerImage", default=None,
help="Use the the specified pre-built containter image "
"rather than pulling one from quay.io")
## options for importing assemblies:
# following arguments are only useful under --non_blast_output
# parser.add_argument('--non_blast_output', action='store_true',
# help="Instead of using cactus-blast-style prepended ids, use an alternative import method that only alters contig ids if absolutely necessary.")
# parser.add_argument('--all_unique_ids', action='store_true',
# help="Only take effect when called with --non_blast_output. Prevents the program from touching the assembly files; the user promises that they don't contain any duplicate contig ids. In reality, there should never be contig renamings if there are no duplicate fasta ids.")
# parser.add_argument('--overwrite_assemblies', action='store_true',
# help="When cleaning the assembly files to make sure there are no duplicate contig ids, overwrite the assembly files. Copy them to a neigboring folder with the affix '_edited_for_duplicate_contig_ids' instead.")
# # Useful in normal asms import
# parser.add_argument('--assembly_save_dir', type=str, default='./unique_id_assemblies/',
# help='While deduplicating contig ids in the input fastas, save the assemblies in this directory. Ignored when used in conjunction with --overwrite_assemblies.')
# for debugging:
parser.add_argument('--debug_export', action='store_true',
help='Export several other files for debugging inspection.')
parser.add_argument('--debug_export_dir', type=str, default='./debug_export_dir/',
help='Location of the exported debug files.')
options = parser.parse_args()
return options
def main():
options = get_options()
with Toil(options) as toil:
setupBinaries(options)
importSingularityImage(options)
# load up the seqfile and figure out the outgroups and schedule
config_node = ET.parse(options.configFile).getroot()
config_wrapper = ConfigWrapper(config_node)
config_wrapper.substituteAllPredefinedConstantsWithLiterals()
mc_tree, input_seq_map, og_candidates = parse_seqfile(options.seqFile, config_wrapper)
og_map = compute_outgroups(mc_tree, config_wrapper, set(og_candidates))
event_set = get_event_set(mc_tree, config_wrapper, og_map, mc_tree.getRootName())
# apply path overrides. this was necessary for wdl which doesn't take kindly to
# text files of local paths (ie seqfile). one way to fix would be to add support
# for s3 paths and force wdl to use it. a better way would be a more fundamental
# interface shift away from files of paths throughout all of cactus
if options.pathOverrides:
for name, override in zip(options.pathOverrideNames, options.pathOverrides):
input_seq_map[name] = override
# check --reference input
if options.reference:
leaves = [mc_tree.getName(leaf) for leaf in mc_tree.getLeaves()]
if options.reference not in leaves:
raise RuntimeError("Genome specified with --reference, {}, not found in tree leaves".format(options.reference))
#import the sequences
input_seq_id_map = {}
for (genome, seq) in input_seq_map.items():
if genome in event_set:
if os.path.isdir(seq):
tmpSeq = getTempFile()
catFiles([os.path.join(seq, subSeq) for subSeq in os.listdir(seq)], tmpSeq)
seq = tmpSeq
seq = makeURL(seq)
logger.info("Importing {}".format(seq))
input_seq_id_map[genome] = toil.importFile(seq)
## Perform alignments:
if not toil.options.restart:
alignments = toil.start(Job.wrapJobFn(run_cactus_reference_align, input_seq_id_map, options.reference, options.debug_export, options.dipcall_bed_filter, options.dipcall_vcf_filter))
else:
alignments = toil.restart()
## Save alignments:
if options.dipcall_vcf_filter: # this is substantially less restrictive than the dipcall_bed_filter.
dipcall_filtered = toil.start(Job.wrapJobFn(apply_dipcall_vcf_filter, alignments))
toil.exportFile(dipcall_filtered, makeURL(options.outputFile))
else:
toil.exportFile(alignments, makeURL(options.outputFile))
if __name__ == "__main__":
main()
|
28e5a6195af8bf9f660235d157b778c133ccca94
|
701e168c325bbe17364db74c46bb8230029f4956
|
/rqt_joint_trajectory_controller/scripts/rqt_joint_trajectory_controller
|
d0eb860fc585f765b3b1df265f5da2b1c6400db1
|
[
"BSD-3-Clause"
] |
permissive
|
ros-controls/ros_controllers
|
d1c1a9e79e55ed6d13a2d71ba21f1f3abac5ca42
|
dce87855e1adf3a66271db8243d58561da9f2cdb
|
refs/heads/noetic-devel
| 2023-08-16T13:13:49.159897
| 2023-03-04T04:35:52
| 2023-03-05T08:56:35
| 6,659,004
| 504
| 500
|
BSD-3-Clause
| 2023-08-29T10:53:39
| 2012-11-12T19:20:05
|
C++
|
UTF-8
|
Python
| false
| false
| 157
|
rqt_joint_trajectory_controller
|
#!/usr/bin/env python3
import sys
from rqt_gui.main import Main
main = Main()
sys.exit(main.main(sys.argv, standalone='rqt_joint_trajectory_controller'))
|
|
862d8795ec9753c720dcb577a63e34e31e85a9be
|
f1973e136f49f0b5ea2ec63c4d862188d197e5a5
|
/gui/dc/network/forms.py
|
1d97e433e3af5a5d32e034696ece5e4cee7d4cf3
|
[
"Apache-2.0"
] |
permissive
|
erigones/esdc-ce
|
65dc7d84e1bca3e3fcec668f54acae20183096a2
|
7e3dedddbe821283d909393f333eed4acd452953
|
refs/heads/master
| 2023-02-07T17:57:15.970089
| 2022-02-03T12:55:14
| 2022-02-03T12:55:14
| 73,122,985
| 123
| 36
|
Apache-2.0
| 2023-01-24T23:22:54
| 2016-11-07T21:34:53
|
Python
|
UTF-8
|
Python
| false
| false
| 11,559
|
py
|
forms.py
|
from django import forms
from django.utils.translation import ungettext_lazy, ugettext_lazy as _
from django.utils.six import text_type
# noinspection PyCompatibility
import ipaddress
from api.dc.network.views import dc_network
from api.network.base.views import net_manage
from api.network.ip.views import net_ip, net_ip_list
from api.vm.utils import get_owners
from gui.forms import SerializerForm
from gui.fields import ArrayField
from gui.widgets import ArrayWidget
from vms.models import Subnet, IPAddress, Node
TEXT_INPUT_ATTRS = {'class': 'input-transparent narrow', 'required': 'required'}
SELECT_ATTRS = {'class': 'narrow input-select2'}
class DcNetworkForm(SerializerForm):
"""
Create or remove DC<->Subnet link by calling dc_network.
"""
_api_call = dc_network
name = forms.ChoiceField(label=_('Network'), required=True,
widget=forms.Select(attrs={'class': 'input-select2 narrow disable_created2'}))
def __init__(self, request, networks, *args, **kwargs):
super(DcNetworkForm, self).__init__(request, None, *args, **kwargs)
self.fields['name'].choices = networks.values_list('name', 'alias')
class AdminNetworkForm(SerializerForm):
"""
Create Subnet by calling net_manage.
"""
_api_call = net_manage
network = None
netmask = None
dc_bound = forms.BooleanField(label=_('DC-bound?'), required=False,
widget=forms.CheckboxInput(attrs={'class': 'normal-check'}))
name = forms.CharField(label=_('Name'), max_length=32, required=True,
widget=forms.TextInput(attrs={'class': 'input-transparent narrow disable_created',
'required': 'required', 'pattern': '[A-Za-z0-9._-]+'}))
alias = forms.CharField(label=_('Alias'), required=True, max_length=32,
widget=forms.TextInput(attrs=TEXT_INPUT_ATTRS))
owner = forms.ChoiceField(label=_('Owner'), required=False,
widget=forms.Select(attrs=SELECT_ATTRS))
access = forms.TypedChoiceField(label=_('Access'), required=False, coerce=int, choices=Subnet.ACCESS,
widget=forms.Select(attrs=SELECT_ATTRS))
desc = forms.CharField(label=_('Description'), max_length=128, required=False,
widget=forms.TextInput(attrs={'class': 'input-transparent wide', 'required': ''}))
ip_network = forms.CharField(label=_('Network/Netmask'), required=True, max_length=34,
help_text=_('IPv4 network address with netmask in CIDR format.'),
widget=forms.TextInput(attrs=TEXT_INPUT_ATTRS)) # IP address validated in serializer
gateway = forms.CharField(label=_('Gateway'), required=False, max_length=32,
help_text=_('IPv4 gateway in quad-dotted format.'),
widget=forms.TextInput(attrs=TEXT_INPUT_ATTRS)) # IP address is validated in serializer
vlan_id = forms.IntegerField(label=_('VLAN ID'), required=True, widget=forms.TextInput(attrs=TEXT_INPUT_ATTRS),
help_text=_('802.1Q virtual LAN ID (0 - 4096, 0 = none).'))
nic_tag = forms.ChoiceField(label=_('NIC Tag'), required=True,
help_text=_('NIC tag or device name on compute node.'),
widget=forms.Select(attrs=SELECT_ATTRS))
vxlan_id = forms.IntegerField(label=_('VXLAN ID'), required=False, widget=forms.TextInput(attrs=TEXT_INPUT_ATTRS),
help_text=_('VXLAN ID required for overlay NIC tags (1 - 16777215).'))
# Advanced options
resolvers = ArrayField(label=_('Resolvers'), required=False,
help_text=_('Comma-separated list of IPv4 addresses that can be used as resolvers.'),
widget=ArrayWidget(attrs={'class': 'input-transparent narrow'}))
# dns_domain = forms.CharField(label=_('DNS Domain'), required=False,
# help_text=_('Existing domain name used for creating A records for virtual servers'),
# widget=forms.TextInput(attrs={'class': 'input-transparent wide', 'required': ''}))
ptr_domain = forms.CharField(label=_('PTR Domain'), required=False,
help_text=_('Existing in-addr.arpa domain used for creating PTR associations with '
'virtual servers.'),
widget=forms.TextInput(attrs={'class': 'input-transparent wide', 'required': ''}))
dhcp_passthrough = forms.BooleanField(label=_('DHCP Passthrough'), required=False,
help_text=_('When enabled, IP addresses for this network are managed by '
'an external DHCP service.'),
widget=forms.CheckboxInput(attrs={'class': 'normal-check'}))
mtu = forms.IntegerField(label=_('MTU'), required=False, widget=forms.TextInput(attrs=TEXT_INPUT_ATTRS),
help_text=_('MTU for the network vNIC (576 - 9000)'))
def __init__(self, request, net, *args, **kwargs):
super(AdminNetworkForm, self).__init__(request, net, *args, **kwargs)
self.fields['owner'].choices = get_owners(request).values_list('username', 'username')
self.fields['nic_tag'].choices = Node.all_nictags_choices()
if not request.user.is_staff:
self.fields['dc_bound'].widget.attrs['disabled'] = 'disabled'
def clean_ip_network(self):
try:
n = '/'.join(map(str.strip, str(self.cleaned_data.get('ip_network')).split('/')))
net = ipaddress.ip_network(text_type(n))
except ValueError:
raise forms.ValidationError(_('Enter valid IPv4 network and netmask.'))
else:
self.network, self.netmask = net.with_netmask.split('/')
return text_type(net)
def _initial_data(self, request, obj):
return obj.web_data_admin
def _set_custom_api_errors(self, errors):
# ip_network field does not exist in NetworkSerializer
# network and netmask errors must be set to ip_network field
network_errors = errors.get('network', [])
netmask_errors = errors.get('netmask', [])
if network_errors or netmask_errors:
if network_errors == netmask_errors:
ip_network_errors = network_errors
else:
ip_network_errors = network_errors + netmask_errors
self._errors['ip_network'] = self.error_class(ip_network_errors)
# The ip_network will not probably be in cleaned_data, but just in case remove it here
try:
del self.cleaned_data['ip_network']
except KeyError:
pass
def _final_data(self, data=None):
ret = super(AdminNetworkForm, self)._final_data(data=data)
ip_network = ret.pop('ip_network', None)
if ip_network:
ret['network'] = self.network
ret['netmask'] = self.netmask
if self.action == 'create': # Add dc parameter when doing POST (required by api.db.utils.get_virt_object)
ret['dc'] = self._request.dc.name
return ret
class NetworkIPForm(SerializerForm):
"""
Create, update or delete network IP address.
"""
_ip = None
_count = 0
_api_call = net_ip
template = 'gui/dc/network_ip_form.html'
ip = forms.GenericIPAddressField(label=_('IPv4 address'), required=True, protocol='ipv4',
widget=forms.TextInput(attrs={'class': 'input-transparent narrow disable_created',
'required': 'required', 'pattern': '[0-9.]+'}))
count = forms.IntegerField(label=_('Count'), required=False, min_value=1, max_value=254,
help_text=_('Number of IP addresses to create.'),
widget=forms.TextInput(attrs={'class': 'input-transparent narrow', 'required': ''}))
usage = forms.TypedChoiceField(label=_('Usage'), required=False, choices=IPAddress.USAGE, coerce=int,
widget=forms.Select(attrs=SELECT_ATTRS))
note = forms.CharField(label=_('Note'), max_length=128, required=False,
widget=forms.TextInput(attrs={'class': 'input-transparent wide', 'required': ''}))
def __init__(self, request, net, ip, *args, **kwargs):
self.net = net
super(NetworkIPForm, self).__init__(request, ip, *args, **kwargs)
def _initial_data(self, request, obj):
return obj.web_data
def clean(self):
data = super(NetworkIPForm, self).clean()
count = data.pop('count', 0)
ip = data.pop('ip', None)
if not ip: # Cannot continue without ip
return data
if count and count > 1:
ips = []
i = 0
for ipaddr in self.net.ip_network.hosts(): # iterator
ipaddr = text_type(ipaddr)
i4 = ipaddr.split('.')[-1]
# Although these are valid IP addresses it is kind of unusual to use these IPs for virtual machines
if i4 == '0' or i4 == '255':
continue
if i or ipaddr == ip:
ips.append(ipaddr)
i += 1
if i >= count:
break
if ips:
data['ips'] = ips
self._count = len(ips)
else:
self._errors['count'] = self.error_class([_('Invalid IP address range.')])
else:
self._count = 1
self._ip = ip
return data
def api_call_args(self, net_name):
if self._ip:
self.__class__._api_call = net_ip
return net_name, self._ip
else:
self.__class__._api_call = net_ip_list
return net_name,
def get_action_message(self):
assert self.action in self._api_method, 'Unknown action'
if self.action == 'update':
return _('IP address was successfully updated')
elif self.action == 'delete':
return _('IP address was successfully deleted')
else:
return ungettext_lazy(
'IP address was successfully created',
'%(count)d IP addresses were successfully created',
self._count
) % {'count': self._count}
class MultiNetworkIPForm(SerializerForm):
"""
Delete multiple network IP addresses at once.
"""
_api_call = net_ip_list
template = 'gui/dc/network_ips_form.html'
ips = ArrayField(required=True, widget=forms.HiddenInput())
def __init__(self, request, net, ip, *args, **kwargs):
self.net = net
super(MultiNetworkIPForm, self).__init__(request, ip, *args, **kwargs)
@staticmethod
def api_call_args(net_name):
return net_name,
def get_action_message(self):
assert self.action == 'delete', 'Unknown action'
count = len(self.cleaned_data.get('ips', ()))
return ungettext_lazy(
'IP address was successfully deleted',
'%(count)d IP addresses were successfully deleted',
count
) % {'count': count}
|
61b646ed968064c0394943cf8b6e87e21eab329e
|
1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2
|
/stubs/google/appengine/api/__init__.pyi
|
a6f574fcd84a071532d806e3160d5cb867a60bc7
|
[
"MIT"
] |
permissive
|
the-blue-alliance/the-blue-alliance
|
3dc210a9611ce9b240907ffd420f78040318dcdc
|
6d42f3cdb2f785d192f2871419e58aaae3445029
|
refs/heads/py3
| 2023-08-22T21:02:36.398100
| 2023-08-22T19:14:01
| 2023-08-22T19:14:01
| 888,427
| 344
| 263
|
MIT
| 2023-09-14T18:35:20
| 2010-09-04T20:34:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 152
|
pyi
|
__init__.pyi
|
from google.appengine.api import full_app_id as full_app_id
def wrap_wsgi_app(app, use_legacy_context_mode: bool = ..., use_deferred: bool = ...): ...
|
c47abe72969d6dcde71cfc5d843c75b5c88aab15
|
539431f1d968b1f89ddb428b606ee3f13c160dbe
|
/fuzzinator/fuzzer/random_content.py
|
b696f2576c382cc1fa65a33caa3e73ca00f38f30
|
[
"BSD-3-Clause"
] |
permissive
|
renatahodovan/fuzzinator
|
b5228398d1b53c7c2b74b3e59d771087504cc3aa
|
06abd0da776e217192417b167e9ed8ae09909dab
|
refs/heads/master
| 2023-06-28T20:26:43.522672
| 2023-06-17T16:56:16
| 2023-06-17T16:56:16
| 71,513,784
| 208
| 45
|
BSD-3-Clause
| 2023-06-17T16:56:18
| 2016-10-20T23:56:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
random_content.py
|
# Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import random
import string
from .fuzzer import Fuzzer
class RandomContent(Fuzzer):
"""
Example fuzzer to generate strings of random length from random ASCII
uppercase letters and decimal digits.
**Optional parameters of the fuzzer:**
- ``min_length``: minimum length of the string to generate (integer
number, 1 by default)
- ``max_length``: maximum length of the string to generate (integer
number, 1 by default)
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
# see fuzzinator.call.*
[fuzz.foo-with-random]
sut=foo
fuzzer=fuzzinator.fuzzer.RandomContent
batch=100
[fuzz.foo-with-random.fuzzer]
min_length=100
max_length=1000
"""
def __init__(self, *, min_length=1, max_length=1, **kwargs):
self.min_length = int(min_length)
self.max_length = int(max_length)
def __call__(self, *, index):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits)
for _ in range(random.randint(self.min_length, self.max_length))).encode('utf-8', errors='ignore')
|
c4740ce52bfacac9b7d74d104964e823b73fd548
|
3076bd73c41ed665c987d99218b8a3599fa05ec2
|
/demos/reversible_demo.py
|
d9151cecc2e610a915a61a9c0371ee3aa05b6c81
|
[
"Apache-2.0"
] |
permissive
|
lantunes/cellpylib
|
5135a6986e68424d9ec8b09fb42421b3dcf046d1
|
743e936d48f8520f6f4ac652570ac7bb46414189
|
refs/heads/master
| 2023-03-07T03:31:32.380400
| 2023-02-21T12:34:28
| 2023-02-21T12:34:28
| 126,618,694
| 203
| 32
|
Apache-2.0
| 2023-02-15T03:40:38
| 2018-03-24T16:33:15
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
reversible_demo.py
|
import numpy as np
import cellpylib as cpl
# NKS page 437 - Rule 214R
# run the CA forward for 32 steps to get the initial condition for the next evolution
cellular_automaton = cpl.init_simple(63)
rule = cpl.ReversibleRule(cellular_automaton[0], 214)
cellular_automaton = cpl.evolve(cellular_automaton, timesteps=32, apply_rule=rule)
# use the last state of the CA as the initial, previous state for this evolution
rule = cpl.ReversibleRule(cellular_automaton[-1], 214)
cellular_automaton = np.array([cellular_automaton[-2]])
cellular_automaton = cpl.evolve(cellular_automaton, timesteps=62, apply_rule=rule)
cpl.plot(cellular_automaton)
|
a93d2d689109b36239173c0104826ff19bb3c541
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/aio/operations/_patch.py
|
a2da554aba841c0f6daa4863937672d53454eb75
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,679
|
py
|
_patch.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Customize generated code here.
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from ... import models as _models
from ._operations import AuthenticationOperations as AuthenticationOperationsGenerated
from ...operations._patch import (
build_exchange_aad_access_token_for_acr_refresh_token_request,
build_exchange_acr_refresh_token_for_acr_access_token_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AuthenticationOperations(AuthenticationOperationsGenerated):
@distributed_trace_async
async def exchange_aad_access_token_for_acr_refresh_token(
self,
grant_type: Union[str, "_models.PostContentSchemaGrantType"],
service: str,
tenant: Optional[str] = None,
refresh_token: Optional[str] = None,
access_token: Optional[str] = None,
**kwargs: Any
) -> _models.AcrRefreshToken:
"""Exchange AAD tokens for an ACR refresh Token.
:param grant_type: Can take a value of access_token_refresh_token, or access_token, or
refresh_token.
:type grant_type: str or ~container_registry.models.PostContentSchemaGrantType
:param service: Indicates the name of your Azure container registry.
:type service: str
:param tenant: AAD tenant associated to the AAD credentials. Default value is None.
:type tenant: str
:param refresh_token: AAD refresh token, mandatory when grant_type is
access_token_refresh_token or refresh_token. Default value is None.
:type refresh_token: str
:param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token
or access_token. Default value is None.
:type access_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AcrRefreshToken, or the result of cls(response)
:rtype: ~container_registry.models.AcrRefreshToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop(
"content_type", _headers.pop("Content-Type", "application/x-www-form-urlencoded")
) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AcrRefreshToken]
# Construct form data
_data = {
"grant_type": grant_type,
"service": service,
"tenant": tenant,
"refresh_token": refresh_token,
"access_token": access_token,
}
request = build_exchange_aad_access_token_for_acr_refresh_token_request(
api_version=api_version,
content_type=content_type,
data=_data,
headers=_headers,
params=_params,
)
path_format_arguments = {
"url": self._serialize.url("self._config.url", self._config.url, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("AcrRefreshToken", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def exchange_acr_refresh_token_for_acr_access_token(
self,
service: str,
scope: str,
refresh_token: str,
grant_type: Union[str, "_models.TokenGrantType"] = "refresh_token",
**kwargs: Any
) -> _models.AcrAccessToken:
"""Exchange ACR Refresh token for an ACR Access Token.
:param service: Indicates the name of your Azure container registry.
:type service: str
:param scope: Which is expected to be a valid scope, and can be specified more than once for
multiple scope requests. You obtained this from the Www-Authenticate response header from the
challenge.
:type scope: str
:param refresh_token: Must be a valid ACR refresh token.
:type refresh_token: str
:param grant_type: Grant type is expected to be refresh_token. Default value is
"refresh_token".
:type grant_type: str or ~container_registry.models.TokenGrantType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AcrAccessToken, or the result of cls(response)
:rtype: ~container_registry.models.AcrAccessToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01")) # type: str
content_type = kwargs.pop(
"content_type", _headers.pop("Content-Type", "application/x-www-form-urlencoded")
) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AcrAccessToken]
# Construct form data
_data = {
"service": service,
"scope": scope,
"refresh_token": refresh_token,
"grant_type": grant_type,
}
request = build_exchange_acr_refresh_token_for_acr_access_token_request(
api_version=api_version,
content_type=content_type,
data=_data,
headers=_headers,
params=_params,
)
path_format_arguments = {
"url": self._serialize.url("self._config.url", self._config.url, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("AcrAccessToken", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
__all__ = [
"AuthenticationOperations"
] # type: List[str] # Add all objects you want publicly available to users at this package level
def patch_sdk():
"""Do not remove from this file.
`patch_sdk` is a last resort escape hatch that allows you to do customizations
you can't accomplish using the techniques described in
https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
|
ca9c40ca5b90067a6c7b9bf7d31b25ebba8151dd
|
0ce57820de9602752451f901657021ebcea46fa2
|
/Solver/operand/operand.py
|
84985ce878790dddfa1116ba5bc746fdb0c3c1d6
|
[
"MIT"
] |
permissive
|
NCIC-PARALLEL/GSWITCH-LIB
|
ca085d5f3e34b839cc391c6a69bf8f3121ffd44a
|
e25efae3dc868d90aee5b78abe0290441398ce2b
|
refs/heads/master
| 2023-06-19T06:32:07.286524
| 2019-10-08T00:08:45
| 2019-10-08T00:08:45
| 388,358,938
| 204
| 69
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,926
|
py
|
operand.py
|
from sets import Set
from subprocess import Popen, PIPE, STDOUT
import subprocess
import struct
import sys
import os
from collections import defaultdict
types=[]
d=[]
def check_mode(arch):
if (arch == "SM21" or arch=="Fermi" or arch == "SM35" or arch=="Kepler"):
return 1
elif (arch == "SM52" or arch == "Mawell"):
return 2
else:
return 0
def dump(newcode, mode, arch):
##Raw binary
if mode == 1:
#print newcode
#ff = '0x%016x' % newcode
base=int(newcode, 16)
ff="tmp.bin"
fout = open(ff, 'wb')
fout.write(struct.pack('<Q', int(base)))
fout.close()
#nvdisasm -b SM35 ff
#redirect stderr to stdout
cmd = 'nvdisasm -b SM35 %s 2>&1' % ff
tmp = os.popen(cmd).read()
rmfile = 'rm %s' % ff
os.system(rmfile)
return tmp
elif mode == 2:
if arch == "Maxwell" or arch == "SM52":
f = open("test_sm52.cubin",'rb+')
f.seek(808)
base=int(newcode, 16)
f.write(struct.pack('Q', int(base)))
f.close()
cmd = 'cuobjdump --gpu-architecture sm_52 --dump-sass test_sm52.cubin 2>&1'
tmp = os.popen(cmd).read()
return tmp
else:
print "You need to provide a cubin template and position of first instruction !"
exit()
else:
print "Error dump mode !"
exit()
def change(my, origin):
if (my.op != origin.op):
return -1
elif my.modifier != origin.modifier:
return -2
elif len(my.operand) != len(origin.operand):
return -3
elif my.operandType != origin.operandType:
return -4
else:
for ii in range(len(my.operand)):
if (my.operand[ii] != origin.operand[ii]):
return ii
return -5
class Inst:
def __init__(self, inst):
l=len(inst)
#self.pred=
self.op = ""
self.dst=""
self.src=""
begin = 0
index = 0
self.probe= 0
#check predicate, such as @
if inst[0] == '{':
inst.pop(0)
if (inst[0].find("@") != -1):
self.pred=inst[0]
inst.pop(0)
#opcode
#check opcode
if inst[index][len(inst[index]) - 1] == ";" :
str=inst[index][0:len(inst[index]) -1 ]
else:
str = inst[index]
op = str.split(".");
self.op = op[0]
self.modifier=Set([]);
op.pop(0)
if (len(op) >= 1): # has modifiers
#flag has or not
self.modifier=Set(op);
inst.pop(0)
#R0, [R2], R0;
str = ' '.join(inst)
#str.replace(";", ",")
operands = str.split()
#print str
operandType=''
self.operand=[]
for operand in operands:
#check operand type: const? imm? Predicate ? register
#print operand
#(ret, value)=self.check(operand)
ret=self.check(operand)
operandType = operandType + ret
self.operandType = operandType
try:
idx = d.index(operandType)
except ValueError:
if operandType.find("X") == -1:
d.append(operandType)
self.probe = 1
def printInst(self):
print self.op, self.modifier, self.dst
def check(self, input):
operand = input[0:len(input)-1]
#print operand
key = operand[0]
idx = 0
while operand[idx] == '-' or operand[idx] == '|':
#idx = idx + 1
#key = operand[idx]
operand=operand[1:]
key=operand[0]
if key == 'R':
value=operand[1:]
try:
if float(value).is_integer():
self.operand.append(value)
return ('R')
except ValueError:
return 'X'
elif key == 'P':
value=operand[1:]
try:
if float(value).is_integer():
#self. = value
self.operand.append(value)
return 'P'
except ValueError:
return 'X'
#c[0x0][0x0]
elif key == 'c':
value=operand[1:]
begin=operand.find('x')
end=operand.find("]")
self.operand.append(operand[begin+1:end])
begin=operand.find('x',end)
end=operand.find("]", begin)
self.operand.append(operand[begin+1:end])
return 'C'
#integer
else:
try:
if float(operand).is_integer():
self.operand.append(float(operand))
return 'I'
except ValueError:
#hex immediate
if len(operand) >=2 and operand[0:2] == "0x":
self.operand.append(operand[1:])
return "I"
else:
return 'X'
return 'X'
if __name__ == "__main__":
count = 0;
#with open("uuu.sass") as f:
print "......................................................................."
print "......R:Register, I:Immediate, M:Memory, P:Predicate, C:constant......."
print "......Instruction's operands are combinations of R, I, M, P, C........."
print "......................................................................."
print " argv[1]: disasssembly file;"
print " argv[2]: arch: SM21|SM35|Maxwell|Kepler|SM52 "
with open(sys.argv[1]) as f:
for line in f:
count += 1
list=line.split()
#list.pop(0)
enc = list[len(list)-2]
base=int(enc, 16)
list.pop(len(list)-1)
list.pop(len(list)-1)
list.pop(len(list)-1)
origin=Inst(list)
if origin.probe == 1 and len(origin.operand) > 0:
pp = [[] for i in range(len(origin.operand)) ]
for i in range(0, 64):
#compute opcode, shift 64-4 bits
mask = 2**i
newcode = base ^ mask
fname = hex(newcode)
## mode 1: nvdisasm, raw binary, mode 2: cuobjdump, cubin ##
mode = check_mode(sys.argv[2])
ff = '0x%016x' % newcode
tmp = dump(ff, mode, sys.argv[2])
if tmp and tmp.find("?") == -1 and tmp.find("error") == -1:
instline=tmp.split("\n")
if (mode == 1):
inst = instline[1].split();
else:
inst = instline[5].split();
inst.pop(0)
inst.pop(len(inst) -1)
inst.pop(len(inst) -1)
inst.pop(len(inst) -1)
#ATOM.E.ADD.F32.FTZ.RN R16, [R2], R0; /* 0x68380000001c0842 */
my=Inst(inst)
ith=change(my, origin)
if ith >= 0 :
pp[ith].append(i)
print "..........................................................."
print "(Line", count, "of", sys.argv[1],") operand combination type:", origin.operandType
for k in range(len(pp)):
if k >= len(origin.operandType)-1 :
operandtype = origin.operandType[len(origin.operandType)-1]
else:
operandtype = origin.operandType[k]
print k, "operand is", operandtype
print "Encoding is:", pp[k]
print ""
"""
for dd in d:
if dd.find("X") == -1 :
print dd
"""
|
9fcfa4ed7e4656971a3009acad9fe995d2ad1974
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/aldi_nord_be.py
|
16b2e1a080c0ad0bf6878bf9c0de0f49504c10c0
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
aldi_nord_be.py
|
from locations.storefinders.uberall import UberallSpider
class AldiNordBESpider(UberallSpider):
name = "aldi_nord_be"
item_attributes = {"brand": "ALDI", "brand_wikidata": "Q41171373"}
key = "ALDINORDBE_4QRaIWlJgn529tNr9oXuh0fFhxYo9V"
def parse_item(self, item, feature, **kwargs):
item["ref"] = str(feature["id"])
slug = "/".join([item["city"], item["street_address"], item["ref"]]).lower().replace(" ", "-")
item["website"] = "https://www.aldi.be/nl/informatie/supermarkten.html/l/" + slug
yield item
|
0098a1b3b3853917adc22f8c257ba69e38f85e4b
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/data/tests/test_pipeline_nohang.py
|
875207e00aba2e93fe3075061d6eaeac1e188d29
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 847
|
py
|
test_pipeline_nohang.py
|
import pytest
import ray
from ray.data.tests.util import column_udf, extract_values
from ray.tests.conftest import * # noqa
NUM_REPEATS = 10
NUM_TASKS = 10
# This test can be flaky if there is resource deadlock between the pipeline
# stages. Run it a lot to ensure no regressions.
def test_basic_actors(shutdown_only):
ray.init(num_cpus=2)
for _ in range(NUM_REPEATS):
ds = ray.data.range(NUM_TASKS)
ds = ds.window(blocks_per_window=1)
assert sorted(
extract_values(
"id",
ds.map(
column_udf("id", lambda x: x + 1),
compute=ray.data.ActorPoolStrategy(),
).take(),
)
) == list(range(1, NUM_TASKS + 1))
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
d5ad77b3a6e66c5c83a4020202195451b9262bb8
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/tests/utils/test_cli_util.py
|
99ee65fc5e47d0eb4744666b25d233582a94d1a5
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 7,972
|
py
|
test_cli_util.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import ast
import json
import os
import sys
from argparse import Namespace
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from unittest import mock
import pytest
import airflow
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.models.log import Log
from airflow.utils import cli, cli_action_loggers, timezone
from airflow.utils.cli import _search_for_dag_file
repo_root = Path(airflow.__file__).parent.parent
class TestCliUtil:
def test_metrics_build(self):
func_name = "test"
exec_date = timezone.utcnow()
namespace = Namespace(dag_id="foo", task_id="bar", subcommand="test", execution_date=exec_date)
metrics = cli._build_metrics(func_name, namespace)
expected = {
"user": os.environ.get("USER"),
"sub_command": "test",
"dag_id": "foo",
"task_id": "bar",
"execution_date": exec_date,
}
for k, v in expected.items():
assert v == metrics.get(k)
assert metrics.get("start_datetime") <= datetime.utcnow()
assert metrics.get("full_command")
def test_fail_function(self):
"""
Actual function is failing and fail needs to be propagated.
:return:
"""
with pytest.raises(NotImplementedError):
fail_func(Namespace())
def test_success_function(self):
"""
Test success function but with failing callback.
In this case, failure should not propagate.
:return:
"""
with fail_action_logger_callback():
success_func(Namespace())
def test_process_subdir_path_with_placeholder(self):
assert os.path.join(settings.DAGS_FOLDER, "abc") == cli.process_subdir("DAGS_FOLDER/abc")
def test_get_dags(self):
dags = cli.get_dags(None, "example_subdag_operator")
assert len(dags) == 1
dags = cli.get_dags(None, "subdag", True)
assert len(dags) > 1
with pytest.raises(AirflowException):
cli.get_dags(None, "foobar", True)
@pytest.mark.parametrize(
["given_command", "expected_masked_command"],
[
(
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin --password test",
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin --password ********",
),
(
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin -p test",
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin -p ********",
),
(
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin --password=test",
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin --password=********",
),
(
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin -p=test",
"airflow users create -u test2 -l doe -f jon -e jdoe@apache.org -r admin -p=********",
),
(
"airflow connections add dsfs --conn-login asd --conn-password test --conn-type google",
"airflow connections add dsfs --conn-login asd --conn-password ******** --conn-type google",
),
(
"airflow scheduler -p",
"airflow scheduler -p",
),
(
"airflow celery flower -p 8888",
"airflow celery flower -p 8888",
),
],
)
def test_cli_create_user_supplied_password_is_masked(
self, given_command, expected_masked_command, session
):
# '-p' value which is not password, like 'airflow scheduler -p'
# or 'airflow celery flower -p 8888', should not be masked
args = given_command.split()
expected_command = expected_masked_command.split()
exec_date = timezone.utcnow()
namespace = Namespace(dag_id="foo", task_id="bar", subcommand="test", execution_date=exec_date)
with mock.patch.object(sys, "argv", args), mock.patch(
"airflow.utils.session.create_session"
) as mock_create_session:
metrics = cli._build_metrics(args[1], namespace)
# Make it so the default_action_log doesn't actually commit the txn, by giving it a nexted txn
# instead
mock_create_session.return_value = session.begin_nested()
mock_create_session.return_value.bulk_insert_mappings = session.bulk_insert_mappings
cli_action_loggers.default_action_log(**metrics)
log = session.query(Log).order_by(Log.dttm.desc()).first()
assert metrics.get("start_datetime") <= datetime.utcnow()
command: str = json.loads(log.extra).get("full_command")
# Replace single quotes to double quotes to avoid json decode error
command = ast.literal_eval(command)
assert command == expected_command
def test_setup_locations_relative_pid_path(self):
relative_pid_path = "fake.pid"
pid_full_path = os.path.join(os.getcwd(), relative_pid_path)
pid, _, _, _ = cli.setup_locations(process="fake_process", pid=relative_pid_path)
assert pid == pid_full_path
def test_setup_locations_absolute_pid_path(self):
abs_pid_path = os.path.join(os.getcwd(), "fake.pid")
pid, _, _, _ = cli.setup_locations(process="fake_process", pid=abs_pid_path)
assert pid == abs_pid_path
def test_setup_locations_none_pid_path(self):
process_name = "fake_process"
default_pid_path = os.path.join(settings.AIRFLOW_HOME, f"airflow-{process_name}.pid")
pid, _, _, _ = cli.setup_locations(process=process_name)
assert pid == default_pid_path
@contextmanager
def fail_action_logger_callback():
"""Adding failing callback and revert it back when closed."""
tmp = cli_action_loggers.__pre_exec_callbacks[:]
def fail_callback(**_):
raise NotImplementedError
cli_action_loggers.register_pre_exec_callback(fail_callback)
yield
cli_action_loggers.__pre_exec_callbacks = tmp
@cli.action_cli(check_db=False)
def fail_func(_):
raise NotImplementedError
@cli.action_cli(check_db=False)
def success_func(_):
pass
def test__search_for_dags_file():
dags_folder = settings.DAGS_FOLDER
assert _search_for_dag_file("") is None
assert _search_for_dag_file(None) is None
# if it's a file, and one can be find in subdir, should return full path
assert _search_for_dag_file("any/hi/test_dags_folder.py") == str(
Path(dags_folder) / "test_dags_folder.py"
)
# if a folder, even if exists, should return dags folder
existing_folder = Path(settings.DAGS_FOLDER, "subdir1")
assert existing_folder.exists()
assert _search_for_dag_file(existing_folder.as_posix()) is None
# when multiple files found, default to the dags folder
assert _search_for_dag_file("any/hi/__init__.py") is None
|
7241436787db8909661c3c6f77c7daf9a4917001
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/website/conferences/signals.py
|
861af183dc9b2f79450ebfe7239d7e699b8de577
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 130
|
py
|
signals.py
|
# -*- coding: utf-8 -*-
import blinker
signals = blinker.Namespace()
osf4m_user_created = signals.signal('osf4m-user-created')
|
e8f4d1ac45f700f7f9bc1f3755746cf6d2aed8d9
|
5c49bd272a10e76b051c862ddaaa0745ea6300e9
|
/fairmotion/tasks/clustering/split_bvh.py
|
e6ac89867c1567dff4470af76852ad0432fa9bbd
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/fairmotion
|
5ff27d2d9b52f5747d99dc556fb33712cb324d19
|
e400e564deac93bb74ffecd9c366d76b3406e217
|
refs/heads/main
| 2023-08-25T02:23:12.826849
| 2022-12-22T16:55:25
| 2022-12-22T16:55:25
| 210,951,863
| 551
| 103
|
NOASSERTION
| 2023-02-23T17:34:19
| 2019-09-25T22:27:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
split_bvh.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import tqdm
from fairmotion.data import bvh
from fairmotion.ops import motion as motion_ops
def split_bvh(filepath, time_window, output_folder):
motion = bvh.load(filepath)
frames_per_time_window = time_window * motion.fps
for num, i in enumerate(
range(0, motion.num_frames(), int(frames_per_time_window / 2))
):
motion_slice = motion_ops.cut(motion, i, i + frames_per_time_window)
filepath_slice = os.path.join(
output_folder,
filepath.split(".")[-2].split("/")[-1] + "_" + str(num) + ".bvh",
)
bvh.save(motion_slice, filepath_slice)
def main(args):
os.makedirs(args.output_folder, exist_ok=True)
for root, _, files in os.walk(args.folder, topdown=False):
for filename in tqdm.tqdm(files):
filepath = os.path.join(root, filename)
split_bvh(filepath, args.time_window, args.output_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Split files in a folder to overlapping n second clips"
)
parser.add_argument(
"--time-window", type=int, help="overlapping time window in seconds"
)
parser.add_argument("--folder", type=str)
parser.add_argument("--output-folder", type=str)
args = parser.parse_args()
main(args)
|
3466c7125a5fece1e93dfb057f25a29911ab64b9
|
2b7e3535fdf055643d07499a5eff12c6e9e29e2f
|
/sopel/tools/identifiers.py
|
017aa0fd064728cadc97244553a490516ef20788
|
[
"EFL-2.0"
] |
permissive
|
sopel-irc/sopel
|
fb9669d82df137a322f38d9a9b38911dd2fdb5c2
|
bc688b4eaee25a1be4fef66477f016bc21ea61d8
|
refs/heads/master
| 2023-09-03T20:10:40.233784
| 2023-08-16T20:55:45
| 2023-08-16T20:55:45
| 3,035,586
| 598
| 422
|
NOASSERTION
| 2023-09-04T06:59:41
| 2011-12-22T17:59:00
|
Python
|
UTF-8
|
Python
| false
| false
| 9,665
|
py
|
identifiers.py
|
"""Identifier tools to represent IRC names (nick or channel).
Nick and channel are defined by their names, which are "identifiers": their
names are used to differentiate users from each others, channels from each
others. To ensure that two channels or two users are the same, their
identifiers must be processed to be compared properly. This process depends on
which RFC and how that RFC is implemented by the server: IRC being an old
protocol, different RFCs have different versions of that process:
* :rfc:`1459#section-2.2`: ASCII characters, and ``[]\\`` are mapped to ``{}|``
* :rfc:`2812#section-2.2`: same as in the previous RFC, adding ``~`` mapped to
``^``
Then when ISUPPORT was added, the `CASEMAPPING parameter`__ was defined so the
server can say which process to apply:
* ``ascii``: only ``[A-Z]`` must be mapped to ``[a-z]`` (implemented by
:func:`ascii_lower`)
* ``rfc1459``: follows :rfc:`2812`, because of how it was implemented in most
servers (implemented by :func:`rfc1459_lower`)
* A strict version of :rfc:`1459` also exists but it is not recommended
(implemented by :func:`rfc1459_strict_lower`)
As a result, the :class:`Identifier` class requires a casemapping function,
which should be provided by the :class:`bot<sopel.bot.Sopel>`.
.. seealso::
The bot's :class:`make_identifier<sopel.bot.Sopel.make_identifier>` method
should be used to instantiate an :class:`Identifier` to honor the
``CASEMAPPING`` parameter.
.. __: https://modern.ircdocs.horse/index.html#casemapping-parameter
"""
from __future__ import annotations
import string
from typing import Callable
Casemapping = Callable[[str], str]
ASCII_TABLE = str.maketrans(string.ascii_uppercase, string.ascii_lowercase)
RFC1459_TABLE = str.maketrans(
string.ascii_uppercase + '[]\\~',
string.ascii_lowercase + '{}|^',
)
RFC1459_STRICT_TABLE = str.maketrans(
string.ascii_uppercase + '[]\\',
string.ascii_lowercase + '{}|',
)
DEFAULT_CHANTYPES = ('#', '&', '+', '!')
def ascii_lower(text: str) -> str:
"""Lower ``text`` according to the ``ascii`` value of ``CASEMAPPING``.
In that version, only ``[A-Z]`` are to be mapped to their lowercase
equivalent (``[a-z]``). Non-ASCII characters are kept unmodified.
"""
return text.translate(ASCII_TABLE)
def rfc1459_lower(text: str) -> str:
"""Lower ``text`` according to :rfc:`2812`.
Similar to :func:`rfc1459_strict_lower`, but also maps ``~`` to ``^``, as
per :rfc:`2812#section-2.2`:
Because of IRC's Scandinavian origin, the characters ``{}|^`` are
considered to be the lower case equivalents of the characters
``[]\\~``, respectively.
.. note::
This is an implementation of the `CASEMAPPING parameter`__ for the
value ``rfc1459``, which doesn't use :rfc:`1459` but its updated version
:rfc:`2812`.
.. __: https://modern.ircdocs.horse/index.html#casemapping-parameter
"""
return text.translate(RFC1459_TABLE)
def rfc1459_strict_lower(text: str) -> str:
"""Lower ``text`` according to :rfc:`1459` (strict version).
As per :rfc:`1459#section-2.2`:
Because of IRC's scandanavian origin, the characters ``{}|`` are
considered to be the lower case equivalents of the characters ``[]\\``.
"""
return text.translate(RFC1459_STRICT_TABLE)
class Identifier(str):
"""A ``str`` subclass which acts appropriately for IRC identifiers.
:param str identifier: IRC identifier
:param casemapping: a casemapping function (optional keyword argument)
:type casemapping: Callable[[:class:`str`], :class:`str`]
When used as normal ``str`` objects, case will be preserved.
However, when comparing two Identifier objects, or comparing an Identifier
object with a ``str`` object, the comparison will be case insensitive.
This case insensitivity uses the provided ``casemapping`` function,
following the rules for the `CASEMAPPING parameter`__ from ISUPPORT. By
default, it uses :func:`rfc1459_lower`, following :rfc:`2812#section-2.2`.
.. note::
To instantiate an ``Identifier`` with the appropriate ``casemapping``
function, it is best to rely on
:meth:`bot.make_identifier<sopel.irc.AbstractBot.make_identifier>`.
.. versionchanged:: 8.0
The ``casemapping`` and ``chantypes`` parameters have been added.
.. __: https://modern.ircdocs.horse/index.html#casemapping-parameter
"""
def __new__(
cls,
identifier: str,
*,
casemapping: Casemapping = rfc1459_lower,
chantypes: tuple = DEFAULT_CHANTYPES,
) -> 'Identifier':
return str.__new__(cls, identifier)
def __init__(
self,
identifier: str,
*,
casemapping: Casemapping = rfc1459_lower,
chantypes: tuple = DEFAULT_CHANTYPES,
) -> None:
super().__init__()
self.casemapping: Casemapping = casemapping
"""Casemapping function to lower the identifier."""
self.chantypes = chantypes
"""Tuple of prefixes used for channels."""
self._lowered = self.casemapping(identifier)
def lower(self) -> str:
"""Get the IRC-compliant lowercase version of this identifier.
:return: IRC-compliant lowercase version used for case-insensitive
comparisons
The behavior of this method depends on the identifier's casemapping
function, which should be selected based on the ``CASEMAPPING``
parameter from ``ISUPPORT``.
.. versionchanged:: 8.0
Now uses the :attr:`casemapping` function to lower the identifier.
"""
return self.casemapping(self)
@staticmethod
def _lower(identifier: str):
"""Convert an identifier to lowercase per :rfc:`2812`.
:param str identifier: the identifier (nickname or channel) to convert
:return: RFC 2812-compliant lowercase version of ``identifier``
:rtype: str
:meta public:
.. versionchanged:: 8.0
Previously, this would lower all non-ASCII characters. It now uses
a strict implementation of the ``CASEMAPPING`` parameter. This is
now equivalent to call :func:`rfc1459_lower`.
If the ``identifier`` is an instance of :class:`Identifier`, this
will call that identifier's :meth:`lower` method instead.
"""
if isinstance(identifier, Identifier):
return identifier.lower()
return rfc1459_lower(identifier)
@staticmethod
def _lower_swapped(identifier: str):
"""Backward-compatible version of :meth:`_lower`.
:param identifier: the identifier (nickname or channel) to convert
:return: RFC 2812-non-compliant lowercase version of ``identifier``
:rtype: str
This is what the old :meth:`_lower` function did before Sopel 7.0. It
maps ``{}``, ``[]``, ``|``, ``\\``, ``^``, and ``~`` incorrectly.
You shouldn't use this unless you need to migrate stored values from
the previous, incorrect "lowercase" representation to the correct one.
:meta public:
.. versionadded: 7.0
This method was added to ensure migration of improperly lowercased
data: it reverts the data back to the previous lowercase rules.
"""
# The tilde replacement isn't needed for identifiers, but is for
# channels, which may be useful at some point in the future.
# Always convert to str, to prevent using custom casemapping
low = str(identifier).lower().replace('{', '[').replace('}', ']')
low = low.replace('|', '\\').replace('^', '~')
return low
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
self.__str__()
)
def __hash__(self):
return self._lowered.__hash__()
def __lt__(self, other):
if isinstance(other, str):
other = self.casemapping(other)
return str.__lt__(self._lowered, other)
def __le__(self, other):
if isinstance(other, str):
other = self.casemapping(other)
return str.__le__(self._lowered, other)
def __gt__(self, other):
if isinstance(other, str):
other = self.casemapping(other)
return str.__gt__(self._lowered, other)
def __ge__(self, other):
if isinstance(other, str):
other = self.casemapping(other)
return str.__ge__(self._lowered, other)
def __eq__(self, other):
if isinstance(other, str):
other = self.casemapping(other)
return str.__eq__(self._lowered, other)
def __ne__(self, other):
return not (self == other)
def is_nick(self) -> bool:
"""Check if the Identifier is a nickname (i.e. not a channel)
:return: ``True`` if this :py:class:`Identifier` is a nickname;
``False`` if it appears to be a channel
::
>>> from sopel import tools
>>> ident = tools.Identifier('Sopel')
>>> ident.is_nick()
True
>>> ident = tools.Identifier('#sopel')
>>> ident.is_nick()
False
To detect channels, :attr:`chantypes` is used::
>>> from sopel import tools
>>> ident = tools.Identifier('!sopel', chantypes=('#', '&'))
>>> ident.is_nick()
True
>>> ident.chantypes = ('#', '&', '!')
>>> ident.is_nick()
False
"""
return bool(self) and not self.startswith(self.chantypes)
|
e4a923aae0e8847214a918f04d43f88638e8a8ad
|
c511dbbe18091d88d1142776f48f88e75de34a93
|
/seglearn/__init__.py
|
cb89d5b8e98ea99c38dc5fd6569cac48d4b52f28
|
[] |
permissive
|
dmbee/seglearn
|
8a052dc0c5de81f0528e6eef9004258218dac612
|
b93b670fd62d4ff22bd44d15023422f20789fd23
|
refs/heads/master
| 2023-08-25T07:22:38.051119
| 2022-08-27T09:00:35
| 2022-08-27T09:00:35
| 123,977,530
| 566
| 72
|
BSD-3-Clause
| 2022-06-16T18:12:40
| 2018-03-05T20:53:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
__init__.py
|
# Author: David Burns
# License: BSD
from . import transform, pipe, util, split, datasets, feature_functions
from ._version import __version__
from .base import TS_Data
from .datasets import load_watch
from .feature_functions import base_features, all_features
from .pipe import Pype
from .preprocessing import TargetRunLengthEncoder
from .split import TemporalKFold, temporal_split
from .transform import Segment, SegmentX, SegmentXY, SegmentXYForecast, PadTrunc, Interp, InterpLongToWide, \
FeatureRep, FeatureRepMix, FunctionTransformer
from .util import check_ts_data, check_ts_data_with_ts_target, ts_stats, get_ts_data_parts
__all__ = ['TS_Data', 'FeatureRep', 'FeatureRepMix', 'PadTrunc', 'Interp', 'InterpLongToWide', 'Pype', 'Segment',
'SegmentX', 'SegmentXY', 'SegmentXYForecast', 'TemporalKFold', 'temporal_split', 'check_ts_data',
'check_ts_data_with_ts_target', 'ts_stats', 'get_ts_data_parts', 'all_features',
'base_features', 'load_watch', 'TargetRunLengthEncoder', 'FunctionTransformer',
'__version__']
__author__ = 'David Burns david.mo.burns@gmail.com'
|
16249fc1de8495458faafd118300f54203b5c0e8
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Intro_Solid_Mechanics_Adeeb/6.4.3.py
|
28a0a67f6f9b504060ffb540634d48b8eeba49b8
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 444
|
py
|
6.4.3.py
|
from sympy import Eq,diff,integrate, dsolve
import sympy as sp
sp.init_printing(use_latex="mathjax")
rho, b1, x1, c, s = sp.symbols("rho b_1 x_1 C sigma")
f = sp.Function("f")
equation = f(x1).diff(x1)+rho*b1
print("equation of equilibrium =",Eq(equation,0))
#intEqu = integrate(equation, x1)
#intEqu = intEqu.subs({b1:10,rho:1})
sol = dsolve(equation.subs({b1:10,rho:1}),f(x1),ics={f(5):20})
print("solution for the stress f(x1) (N/m^2)",sol)
|
cc0e7060b90d87cc1ea1dfb6fd36d93126a66ed4
|
4e4e03b75ed4a89a5bb463d29da2a650edbf829a
|
/transforms3d/tests/test_zooms_shears.py
|
4fc1f32d9924c2344d54dbfe6fbbc8a3b1e853d2
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
matthew-brett/transforms3d
|
98bf30e4ea18dc22248c018da692f4b9d59ce413
|
76213cc35212ffe8f6a3c92cfc4a8c6bc8026d2b
|
refs/heads/main
| 2023-09-05T01:57:37.630640
| 2022-10-11T14:40:05
| 2022-10-11T14:40:05
| 313,553
| 437
| 95
|
NOASSERTION
| 2023-09-12T09:35:20
| 2009-09-21T20:03:09
|
Python
|
UTF-8
|
Python
| false
| false
| 5,857
|
py
|
test_zooms_shears.py
|
""" Testing zooms and shears
"""
import math
import numpy as np
import transforms3d.zooms as tzs
import transforms3d.shears as tss
from transforms3d.utils import vector_norm, random_unit_vector
from numpy.testing import assert_array_equal, assert_array_almost_equal
from transforms3d.testing import assert_raises
def test_zfdir_zmat_aff():
# test zfdir to zmat and back
for i in range(10):
factor = np.random.random() * 10 - 5
direct = np.random.random(3) - 0.5
origin = np.random.random(3) - 0.5
S0 = tzs.zfdir2mat(factor, None)
f2, d2 = tzs.mat2zfdir(S0)
S1 = tzs.zfdir2mat(f2, d2)
assert_array_almost_equal(S0, S1)
direct = np.random.random(3) - 0.5
S0 = tzs.zfdir2mat(factor, direct)
f2, d2 = tzs.mat2zfdir(S0)
S1 = tzs.zfdir2mat(f2, d2)
assert_array_almost_equal(S0, S1)
# affine versions of same
S0 = tzs.zfdir2aff(factor)
f2, d2, o2 = tzs.aff2zfdir(S0)
assert_array_almost_equal(S0, tzs.zfdir2aff(f2, d2, o2))
S0 = tzs.zfdir2aff(factor, direct)
f2, d2, o2 = tzs.aff2zfdir(S0)
assert_array_almost_equal(S0, tzs.zfdir2aff(f2, d2, o2))
S0 = tzs.zfdir2aff(factor, direct, origin)
f2, d2, o2 = tzs.aff2zfdir(S0)
assert_array_almost_equal(S0, tzs.zfdir2aff(f2, d2, o2))
def test_striu():
# Shears encoded as vector from triangle above diagonal of shear mat
S = [0.1, 0.2, 0.3]
assert_array_equal(tss.striu2mat(S),
[[ 1. , 0.1, 0.2],
[ 0. , 1. , 0.3],
[ 0. , 0. , 1. ]])
assert_array_equal(tss.striu2mat([1]),
[[ 1., 1.],
[ 0., 1.]])
for n, N in ((1, 2),
(3, 3),
(6, 4),
(10, 5),
(15, 6),
(21, 7),
(78, 13)):
shears = np.arange(n)
M = tss.striu2mat(shears)
e = np.eye(N)
inds = np.triu(np.ones((N,N)), 1).astype(bool)
e[inds] = shears
assert_array_equal(M, e)
for n in (2, 4, 5, 7, 8, 9):
shears = np.zeros(n)
assert_raises(ValueError, tss.striu2mat, shears)
def ref_mat2sadn(mat):
# Original (unstable) implementation)
mat = np.asarray(mat)
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
l, V = np.linalg.eig(mat)
near_1, = np.nonzero(abs(np.real(l.squeeze()) - 1.0) < 1e-4)
if near_1.size < 2:
raise ValueError("no two linear independent eigenvectors found %s" % l)
V = np.real(V[:, near_1]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = np.cross(V[i0], V[i1])
l = vector_norm(n)
if l > lenorm:
lenorm = l
normal = n
normal /= lenorm
# direction and angle
direction = np.dot(mat - np.eye(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
return angle, direction, normal
def ref_aff2sadn(aff):
# Original (unstable) implementation)
aff = np.asarray(aff)
angle, direction, normal = ref_mat2sadn(aff[:3,:3])
# point: eigenvector corresponding to eigenvalue 1
l, V = np.linalg.eig(aff)
near_1, = np.nonzero(abs(np.real(l.squeeze()) - 1.0) < 1e-8)
if near_1.size == 0:
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = np.real(V[:, near_1[-1]]).squeeze()
point = point[:3] / point[3]
return angle, direction, normal, point
def test_ref_aff2sadn():
# test aff2sadn and reference function
# This reference function can be very unstable.
# Test with known random numbers to make sure we don't hit an unstable
# spot.
rng = np.random.RandomState(12)
for i in range(10):
angle = rng.random_sample() * np.pi
direct = rng.random_sample(3) - 0.5
vect = rng.random_sample(3) # random vector
normal = np.cross(direct, vect) # orthogonalize against direct
point = rng.random_sample(3) - 0.5
# Make shear affine from angle, direction, normal and point
S0 = tss.sadn2aff(angle, direct, normal, point)
# Reconstruct angle, direction, normal, point from affine
a2, d2, n2, p2 = ref_aff2sadn(S0)
# Confirm the shear affines are equivalent
S1 = tss.sadn2aff(a2, d2, n2, p2)
assert_array_almost_equal(S0, S1)
# Confirm similar to actual implementation
a, d, n, p = tss.aff2sadn(S0)
S_actual = tss.sadn2aff(a, d, n, p)
assert_array_almost_equal(S0, S_actual)
def random_normal(direct, rng):
# Make another random vector to form cross-product.
vect = random_unit_vector(rng)
# Cross-product is orthogonal to direct.
return np.cross(direct, vect)
def test_aff2sadn():
# Test actual implemtation
rng = np.random.RandomState()
for i in range(10000):
angle = rng.uniform(-1, 1) * np.pi
direct = random_unit_vector(rng)
rnorm = random_normal(direct, rng)
point = random_unit_vector(rng)
# Make shear affine from angle, direction, normal and point
S0 = tss.sadn2aff(angle, direct, rnorm, point)
# Reconstruct angle, direction, normal, point from affine
a, d, n, p = tss.aff2sadn(S0)
S_actual = tss.sadn2aff(a, d, n, p)
assert_array_almost_equal(S0, S_actual, decimal=5)
def test_inverse_outer():
rng = np.random.RandomState()
for i in range(10000):
in_t = np.tan(rng.uniform(-1, 1) * np.pi)
direct = random_unit_vector(rng)
rnorm = random_normal(direct, rng)
M = in_t * np.outer(direct, rnorm)
t, a, b = tss.inverse_outer(M)
assert np.allclose(M, t * np.outer(a, b))
|
171dce8e55ae8dae8dfa4e3f2058975072c7b6cb
|
7af0ff378525ef6132f74bac0b1eb54ce4c40c08
|
/indico/core/oauth/protector.py
|
e0b6bed816fa594339090fe7ab61fb167581c822
|
[
"MIT"
] |
permissive
|
indico/indico
|
1126ee0ac3e9d36510a64989ce71be9c02680831
|
463951511d3a8409f944f98f29875c4323f3e897
|
refs/heads/master
| 2023-08-31T11:15:00.092526
| 2023-08-30T11:07:25
| 2023-08-30T11:07:25
| 2,113,067
| 1,549
| 429
|
MIT
| 2023-09-13T20:09:56
| 2011-07-27T13:56:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
protector.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from authlib.integrations.flask_oauth2 import ResourceProtector
from authlib.oauth2.rfc6750.validator import BearerTokenValidator
from flask import after_this_request, g, jsonify
from flask import request as flask_request
from werkzeug.exceptions import HTTPException
from indico.core.db import db
from indico.core.oauth.util import query_token
from indico.util.date_time import now_utc
class IndicoAuthlibHTTPError(HTTPException):
def __init__(self, status_code, payload, headers):
super().__init__(payload.get('error_description') or payload['error'])
resp = jsonify(payload)
resp.headers.update(headers)
resp.status_code = status_code
self.response = resp
class IndicoResourceProtector(ResourceProtector):
def raise_error_response(self, error):
payload = dict(error.get_body())
headers = error.get_headers()
raise IndicoAuthlibHTTPError(error.status_code, payload, headers)
class IndicoBearerTokenValidator(BearerTokenValidator):
def authenticate_token(self, token_string):
return query_token(token_string, allow_personal=True)
def validate_token(self, token, scopes, request):
super().validate_token(token, scopes, request)
# if we get here, the token is valid so we can mark it as used at the end of the request
if g.get('_bearer_token_usage_logged'):
return
g._bearer_token_usage_logged = True
# XXX: should we wait or do it just now? even if the request failed for some reason, the
# token could be considered used, since it was valid and most likely used by a client who
# expected to do something with it...
token_id = token.id # avoid DetachedInstanceError in the callback
token_cls = type(token)
@after_this_request
def _update_last_use(response):
with db.tmp_session() as sess:
# do not modify `token` directly, it's attached to a different session!
sess.query(token_cls).filter_by(id=token_id).update({
token_cls.last_used_dt: now_utc(),
token_cls.last_used_ip: flask_request.remote_addr,
token_cls.use_count: token_cls.use_count + 1,
})
sess.commit()
return response
|
88acd39a0bacfe98ef741e1048beb56ac7f13d31
|
31e3e0ce6d8b8cd1b286971aa1ea3c56a338ca48
|
/sunpy/database/tables.py
|
67beddd54445a78db2c939f2ffa62df6cd2d1906
|
[
"BSD-2-Clause"
] |
permissive
|
sunpy/sunpy
|
d8df998cf7753834ffd7add6911c0e4f6828b5b8
|
edd3ea805f4540d41ce2932a0e865cab2d6a4cf5
|
refs/heads/main
| 2023-09-01T12:05:09.343909
| 2023-08-31T13:36:35
| 2023-08-31T13:36:35
| 2,165,383
| 792
| 683
|
BSD-2-Clause
| 2023-09-14T14:03:09
| 2011-08-06T15:34:08
|
Python
|
UTF-8
|
Python
| false
| false
| 32,359
|
py
|
tables.py
|
# Author: Simon Liedtke <liedtke.simon@googlemail.com>
#
# This module was developed with funding provided by
# the Google Summer of Code (2013).
import os
import fnmatch
from datetime import datetime
import numpy as np
from sqlalchemy import Boolean, Column, DateTime, Float, ForeignKey, Integer, String, Table
from sqlalchemy.orm import declarative_base, relationship
import astropy.table
import astropy.units as u
from astropy.time import Time
from astropy.units import equivalencies
import sunpy
import sunpy.net.vso.legacy_response
from sunpy import config
from sunpy.io import _file_tools as sunpy_filetools
from sunpy.io import _fits
from sunpy.io.header import FileHeader
from sunpy.time import parse_time
from sunpy.util.types import DatabaseEntryType
TIME_FORMAT = config.get("general", "time_format")
DEFAULT_HEADER = FileHeader([('SIMPLE', True),
('BITPIX', 8),
('NAXIS', 0),
('EXTEND', True),
('COMMENT', ''),
('HISTORY', ''),
('KEYCOMMENTS',
{'SIMPLE': 'conforms to FITS standard',
'BITPIX': 'array data type',
'NAXIS': 'number of array dimensions'}),
('WAVEUNIT', None)])
__all__ = [
'WaveunitNotFoundError', 'WaveunitNotConvertibleError', 'JSONDump',
'FitsHeaderEntry', 'FitsKeyComment', 'Tag', 'DatabaseEntry',
'entries_from_query_result', 'entries_from_file', 'entries_from_dir',
'display_entries']
Base = declarative_base()
# required for the many-to-many relation on tags:entries
association_table = Table('association', Base.metadata,
Column('tag_name', String, ForeignKey('tags.name')),
Column('entry_id', Integer, ForeignKey('data.id'))
)
class WaveunitNotFoundError(Exception):
"""This exception is raised if a wavelength unit cannot be found in a FITS
header or in a VSO query result block.
"""
def __init__(self, obj):
self.obj = obj
def __str__(self):
return f'the wavelength unit cannot be found in {self.obj}' + \
' and default_waveunit not specified when opening the database'
class WaveunitNotConvertibleError(Exception):
"""This exception is raised if a wavelength cannot be converted to an
astropy.units.Unit instance.
"""
def __init__(self, waveunit):
self.waveunit = waveunit
def __str__(self):
return (
f'the waveunit {self.waveunit!r} cannot be converted to an '
'astropy.units.Unit instance')
class JSONDump(Base):
__tablename__ = 'jsondumps'
dump = Column(String, nullable=False, primary_key=True)
def __init__(self, dump):
self.dump = dump
def __eq__(self, other):
return self.dump == other.dump
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.dump
def __repr__(self):
return f'{self.__class__.__name__}(dump={self.dump!r})'
class FitsHeaderEntry(Base):
__tablename__ = 'fitsheaderentries'
dbentry_id = Column(Integer, ForeignKey('data.id'))
id = Column(Integer, primary_key=True)
key = Column(String, nullable=False)
value = Column(String)
def __init__(self, key, value):
self.key = key
self.value = value
def __eq__(self, other):
return (
(self.id == other.id or self.id is None or other.id is None) and
self.key == other.key and
self.value == other.value)
def __hash__(self):
return super().__hash__()
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return f'{self.__class__.__name__}(id={self.id}, key={self.key!r}, value={self.value!r})'
class FitsKeyComment(Base):
__tablename__ = 'fitskeycomments'
dbentry_id = Column(Integer, ForeignKey('data.id'))
id = Column(Integer, primary_key=True)
key = Column(String, nullable=False)
value = Column(String)
def __init__(self, key, value):
self.key = key
self.value = value
def __eq__(self, other):
return (
(self.id == other.id or self.id is None or other.id is None) and
self.key == other.key and
self.value == other.value)
def __lt__(self, other):
return (f'{self.key}, {self.value}' <
f'{other.key}, {other.value}')
def __hash__(self):
return super().__hash__()
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return f'{self.__class__.__name__}(id={self.id}, key={self.key!r}, value={self.value!r})>'
class Tag(Base):
__tablename__ = 'tags'
name = Column(String, nullable=False, primary_key=True)
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return super().__hash__()
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.name
def __repr__(self):
return f'{self.__class__.__name__}(name={self.name!r})'
class DatabaseEntry(DatabaseEntryType, Base):
"""
This represents the main table of the database
and each instance represents one record that *can* be saved in the
database.
Parameters
----------
id : `int`
A unique ID number. By default it is None, but automatically set to the
maximum number plus one when this entry is added to the database.
source : str
The source is the name of an observatory or the name of a network of
observatories.
provider : str
The name of the server which provides the retrieved data.
physobs : str
A physical observable identifier used by VSO.
fileid : str
The file ID is a string defined by the data provider that should point
to a specific data product. The association of fileid to the specific
data may change sometime, if the fileid always points to the latest
calibrated data.
observation_time_start : datetime.datetime
The date and time when the observation of the data started.
observation_time_end : datetime.datetime
The date and time when the observation of the data ended.
instrument : str
The instrument which was used to observe the data.
size : float
The size of the data in kilobytes.
wavemin : float
The value of the measured wave length.
wavemax : float
This is the same value as ``wavemin``. The value is stored twice,
because each `sunpy.net.dataretriever.client.QueryResponse` which is used by
the vso package contains both these values.
hdu_index : `int`
This value provides a list of all available HDUs and in what
files they are located.
path : str
A local file path where the according FITS file is saved.
download_time : datetime.datetime
The date and time when the files connected to a query have been
downloaded. Note: this is not the date and time when this entry has
been added to a database!
starred : bool
Entries can be starred to mark them. By default, this value is False.
fits_header_entries : list
A list of ``FitsHeaderEntry`` instances.
tags : list
A list of ``Tag`` instances. Use `sunpy.database.Database.tag` to
add a new tag or multiple tags to a specific entry.
"""
__tablename__ = 'data'
# FIXME: primary key is data provider + file ID + download_time!
id = Column(Integer, primary_key=True)
source = Column(String)
provider = Column(String)
physobs = Column(String)
fileid = Column(String)
observation_time_start = Column(DateTime)
observation_time_end = Column(DateTime)
instrument = Column(String)
size = Column(Float)
wavemin = Column(Float)
wavemax = Column(Float)
hdu_index = Column(Integer)
path = Column(String)
download_time = Column(DateTime)
starred = Column(Boolean, default=False)
fits_header_entries = relationship('FitsHeaderEntry')
fits_key_comments = relationship('FitsKeyComment')
tags = relationship('Tag', secondary=association_table, backref='data')
@classmethod
def _from_query_result_block(cls, qr_block, default_waveunit=None):
"""Make a new :class:`DatabaseEntry` instance from a VSO query result
block. The values of :attr:`wavemin` and :attr:`wavemax` are converted
to nm (nanometres).
Parameters
----------
qr_block : `sunpy.net.dataretriever.client.QueryResponse`
A query result block is usually not created directly; instead,
one gets instances of `sunpy.net.dataretriever.client.QueryResponse` by
iterating over a VSO query result.
default_waveunit : str, optional
The wavelength unit that is used if it cannot be found in the
``qr_block``.
Examples
--------
>>> from sunpy.net import vso, attrs as a
>>> from sunpy.database.tables import DatabaseEntry
>>> client = vso.VSOClient() # doctest: +SKIP
>>> qr = client.search(
... a.Time('2001/1/1', '2001/1/2'),
... a.Instrument.eit,
... response_format="legacy") # doctest: +SKIP
>>> entry = DatabaseEntry._from_query_result_block(qr.blocks[0]) # doctest: +SKIP
>>> entry.source # doctest: +SKIP
'SOHO'
>>> entry.provider # doctest: +SKIP
'SDAC'
>>> entry.physobs # doctest: +SKIP
'intensity'
>>> entry.fileid # doctest: +SKIP
'/archive/soho/private/data/processed/eit/lz/2001/01/efz20010101.000042'
>>> entry.observation_time_start, entry.observation_time_end # doctest: +SKIP
(datetime.datetime(2001, 1, 1, 0, 0, 42), datetime.datetime(2001, 1, 1, 0, 0, 54))
>>> entry.instrument # doctest: +SKIP
'EIT'
>>> entry.size # doctest: +SKIP
2059.0
>>> entry.wavemin, entry.wavemax # doctest: +SKIP
(19.5, 19.5)
"""
time_start = datetime.strptime(qr_block.time.start, '%Y%m%d%H%M%S')
if not qr_block.time.end:
qr_block.time.end = qr_block.time.start
time_end = datetime.strptime(qr_block.time.end, '%Y%m%d%H%M%S')
wave = qr_block.wave
unit = None
if wave.waveunit is None:
if default_waveunit is not None:
unit = u.Unit(default_waveunit)
else:
# some query response blocks store the unit "kev",
# but Astropy only understands "keV". See issue #766.
waveunit = wave.waveunit
if waveunit == "kev":
waveunit = "keV"
unit = u.Unit(waveunit)
if wave.wavemin is None:
wavemin = None
else:
if unit is None:
raise WaveunitNotFoundError(qr_block)
wavemin = unit.to(u.nm, float(wave.wavemin),
equivalencies.spectral())
if wave.wavemax is None:
wavemax = None
else:
if unit is None:
raise WaveunitNotFoundError(qr_block)
wavemax = unit.to(u.nm, float(wave.wavemax),
equivalencies.spectral())
source = getattr(qr_block, 'source', None)
provider = getattr(qr_block, 'provider', None)
fileid = getattr(qr_block, 'fileid', None)
instrument = getattr(qr_block, 'instrument', None)
size = getattr(qr_block, 'size', -1)
physobs = getattr(qr_block, 'physobs', None)
if physobs is not None:
physobs = str(physobs)
return cls(
source=source, provider=provider, physobs=physobs, fileid=fileid,
observation_time_start=time_start, observation_time_end=time_end,
instrument=instrument, size=size,
wavemin=wavemin, wavemax=wavemax)
@classmethod
def _from_fido_search_result_block(cls, sr_block, default_waveunit=None):
"""
Make a new :class:`DatabaseEntry` instance from a Fido search
result block.
Parameters
----------
sr_block : `sunpy.net.dataretriever.client.QueryResponse`
A query result block is usually not created directly; instead,
one gets instances of
``sunpy.net.dataretriever.client.QueryResponse`` by iterating
over each element of a Fido search result.
default_waveunit : `str`, optional
The wavelength unit that is used if it cannot be found in the
`sr_block`.
"""
# All attributes of DatabaseEntry that are not in QueryResponse
# are set as None for now.
source = sr_block.get('Source')
provider = sr_block.get('Provider')
physobs = sr_block.get('Physobs')
if physobs is not None:
physobs = str(physobs)
instrument = sr_block.get('Instrument')
time_start = sr_block.get('Start Time')
if time_start is not None:
time_start = time_start.datetime
time_end = sr_block.get('End Time')
if time_end is not None:
time_end = time_end.datetime
wavelengths = sr_block.get('Wavelength', np.nan * u.nm)
if wavelengths is None:
wavelengths = np.nan * u.nm
if isinstance(wavelengths, u.Quantity):
if wavelengths.isscalar:
wavemin = wavemax = wavelengths.to_value(u.nm, equivalencies=u.spectral())
else:
wavemin, wavemax = wavelengths.to_value(u.nm, equivalencies=u.spectral())
else:
raise TypeError("Expected Wavelength in the Fido response to be None or a Quantity")
fileid = sr_block.get('url', sr_block.get('fileid'))
size = None
return cls(
source=source, provider=provider, physobs=physobs, fileid=fileid,
observation_time_start=time_start, observation_time_end=time_end,
instrument=instrument, size=size,
wavemin=wavemin, wavemax=wavemax)
def __eq__(self, other):
if self.wavemin is None and other.wavemin is None:
wavemins_equal = True
elif not all([self.wavemin, other.wavemin]):
# This means one is None and the other isn't
wavemins_equal = False
else:
wavemins_equal = np.allclose([self.wavemin], [other.wavemin], equal_nan=True)
if self.wavemax is None and other.wavemax is None:
wavemaxs_equal = True
elif not all([self.wavemax, other.wavemax]):
# This means one is None and the other isn't
wavemaxs_equal = False
else:
wavemaxs_equal = np.allclose([self.wavemax], [other.wavemax], equal_nan=True)
return (
(self.id == other.id or self.id is None or other.id is None) and
self.source == other.source and
self.provider == other.provider and
self.physobs == other.physobs and
self.fileid == other.fileid and
self.observation_time_start == other.observation_time_start and
self.observation_time_end == other.observation_time_end and
self.instrument == other.instrument and
self.size == other.size and
wavemins_equal and
wavemaxs_equal and
self.path == other.path and
self.download_time == other.download_time and
bool(self.starred) == bool(other.starred) and
self.fits_header_entries == other.fits_header_entries and
self.tags == other.tags)
def _compare_attributes(self, other, attribute_list):
"""
Compare a given list of attributes of two :class:`DatabaseEntry`
instances and return True if all of them match.
Parameters
----------
other : :class:`DatabaseEntry` instance
attribute_list : `list`
The list of attributes that will be compared in both instances,
self and other.
"""
if len(attribute_list) == 0:
raise TypeError('At least one attribute required')
for attribute in attribute_list:
if getattr(self, attribute) != getattr(other, attribute):
return False
return True
def __hash__(self):
return super().__hash__()
def __ne__(self, other):
return not (self == other)
def __repr__(self):
attrs = [
'id', 'source', 'provider', 'physobs', 'fileid',
'observation_time_start', 'observation_time_end', 'instrument',
'size', 'wavemin', 'wavemax', 'path', 'download_time', 'starred',
'fits_header_entries', 'tags']
ret = f'{self.__class__.__name__}('
for attr in attrs:
value = getattr(self, attr, None)
if value:
ret += f'{attr}={value!r}, '
ret = ret.rstrip(', ')
ret += ')'
return ret
def entries_from_query_result(qr, default_waveunit=None):
"""
Use a query response returned from :meth:`sunpy.net.vso.VSOClient.search`
to generate instances of :class:`DatabaseEntry`. Return an iterator over
those instances.
Parameters
----------
qr : `sunpy.net.vso.VSOQueryResponseTable`
The query response from which to build the database entries.
default_waveunit : `str`, optional
The wavelength unit that is used if it cannot be found in the
``qr_block``.
Examples
--------
>>> from sunpy.net import vso, attrs as a
>>> from sunpy.database.tables import entries_from_query_result
>>> client = vso.VSOClient() # doctest: +SKIP
>>> qr = client.search(
... a.Time('2001/1/1', '2001/1/2'),
... a.Instrument.eit,
... response_format="legacy") # doctest: +SKIP
>>> entries = entries_from_query_result(qr) # doctest: +SKIP
>>> entry = next(entries) # doctest: +SKIP
>>> entry.source # doctest: +SKIP
'SOHO'
>>> entry.provider # doctest: +SKIP
'SDAC'
>>> entry.physobs # doctest: +SKIP
'intensity'
>>> entry.fileid # doctest: +SKIP
'/archive/soho/private/data/processed/eit/lz/2001/01/efz20010101.000042'
>>> entry.observation_time_start, entry.observation_time_end # doctest: +SKIP
(datetime.datetime(2001, 1, 1, 0, 0, 42), datetime.datetime(2001, 1, 1, 0, 0, 54))
>>> entry.instrument # doctest: +SKIP
'EIT'
>>> entry.size # doctest: +SKIP
2059.0
>>> entry.wavemin, entry.wavemax # doctest: +SKIP
(19.5, 19.5)
"""
for block in qr:
yield DatabaseEntry._from_query_result_block(block, default_waveunit)
def entries_from_fido_search_result(sr, default_waveunit=None):
"""
Use a `sunpy.net.dataretriever.fido_factory.UnifiedResponse`
object returned from
:meth:`sunpy.net.dataretriever.fido_factory.UnifiedDownloaderFactory.search`
to generate instances of :class:`DatabaseEntry`. Return an iterator
over those instances.
Parameters
----------
sr : `sunpy.net.dataretriever.fido_factory.UnifiedResponse`
A UnifiedResponse object that is used to store responses from the
unified downloader. This is returned by the ``search`` method of a
:class:`sunpy.net.dataretriever.fido_factory.UnifiedDownloaderFactory`
object.
default_waveunit : `str`, optional
The wavelength unit that is used if it cannot be found in the Query
Response block.
Examples
--------
>>> from sunpy.net import Fido, attrs
>>> from sunpy.database.tables import entries_from_fido_search_result
>>> sr = Fido.search(attrs.Time("2012/1/1", "2012/1/2"),
... attrs.Instrument('lyra')) # doctest: +SKIP
>>> entries = entries_from_fido_search_result(sr) # doctest: +SKIP
>>> entry = next(entries) # doctest: +SKIP
>>> entry.source # doctest: +SKIP
'PROBA2'
>>> entry.provider # doctest: +SKIP
'ESA'
>>> entry.physobs # doctest: +SKIP
'irradiance'
>>> entry.fileid # doctest: +SKIP
'http://proba2.oma.be/lyra/data/bsd/2012/01/01/lyra_20120101-000000_lev2_std.fits'
>>> entry.observation_time_start, entry.observation_time_end # doctest: +SKIP
(datetime.datetime(2012, 1, 1, 0, 0), datetime.datetime(2012, 1, 1, 23, 59, 59, 999000))
>>> entry.instrument # doctest: +SKIP
'LYRA'
"""
for entry in sr:
if isinstance(entry, sunpy.net.vso.legacy_response.QueryResponse):
# This is because Fido can search the VSO. It
# returns a VSO QueryResponse.
for block in entry:
yield DatabaseEntry._from_query_result_block(block, default_waveunit)
elif isinstance(entry, sunpy.net.jsoc.jsoc.JSOCResponse):
# Adding JSOC results to the DB not supported for now
raise ValueError("Cannot add JSOC results to database")
else:
for block in entry:
yield DatabaseEntry._from_fido_search_result_block(block, default_waveunit)
def entries_from_file(file, default_waveunit=None,
time_string_parse_format=''):
# Note: time_string_parse_format='' so that None won't be passed to Time.strptime
# (which would make strptime freak out, if I remember correctly).
"""Use the headers of a FITS file to generate an iterator of
:class:`sunpy.database.tables.DatabaseEntry` instances. Gathered
information will be saved in the attribute ``fits_header_entries``. If the
key INSTRUME, WAVELNTH or DATE-OBS / DATE_OBS is available, the attribute
``instrument``, ``wavemin`` and ``wavemax`` or ``observation_time_start`` is set,
respectively. If the wavelength unit can be read, the values of ``wavemin``
and ``wavemax`` are converted to nm (nanometres). The value of the ``file``
parameter is used to set the attribute ``path`` of each generated database
entry.
Parameters
----------
file : str, file object
Either a path pointing to a FITS file or a an opened file-like object.
If an opened file object, its mode must be one of the following rb,
rb+, or ab+.
default_waveunit : str, optional
The wavelength unit that is used for a header if it cannot be found.
time_string_parse_format : str, optional
Fallback timestamp format which will be passed to
`~astropy.time.Time.strptime` if `sunpy.time.parse_time` is unable to
automatically read the ``date-obs`` metadata.
Raises
------
sunpy.database.tables.WaveunitNotFoundError
If ``default_waveunit`` is not given and the wavelength unit cannot
be found in one of the FITS headers
sunpy.database.tables.WaveunitNotConvertibleError
If a wavelength unit could be found but cannot be used to create an
instance of the type ``astropy.units.Unit``. This can be the case
for example if a FITS header has the key ``WAVEUNIT`` with the value
``nonsense``.
Examples
--------
>>> from sunpy.database.tables import entries_from_file
>>> import sunpy.data.sample # doctest: +SKIP
>>> entries = list(entries_from_file(sunpy.data.sample.SWAP_LEVEL1_IMAGE)) # doctest: +SKIP
>>> len(entries) # doctest: +SKIP
1
>>> entry = entries.pop() # doctest: +SKIP
>>> entry.instrument # doctest: +SKIP
'SWAP'
>>> entry.observation_time_start, entry.observation_time_end # doctest: +SKIP
(datetime.datetime(2011, 6, 7, 6, 33, 29, 759000), None)
>>> entry.wavemin, entry.wavemax # doctest: +SKIP
(17.400000000000002, 17.400000000000002)
>>> len(entry.fits_header_entries) # doctest: +SKIP
110
"""
headers = _fits.get_header(file)
# This just checks for blank default headers
# due to compression.
for header in headers:
if header == DEFAULT_HEADER:
headers.remove(header)
if isinstance(file, str):
filename = file
else:
filename = getattr(file, 'name', None)
for header in headers:
entry = DatabaseEntry(path=filename)
for key, value in header.items():
# Yes, it is possible to have an empty key in a FITS file.
# Example: sunpy.data.sample.EIT_195_IMAGE
# Don't ask me why this could be a good idea.
if key == '':
value = str(value)
elif key == 'KEYCOMMENTS':
for k, v in value.items():
entry.fits_key_comments.append(FitsKeyComment(k, v))
continue
entry.fits_header_entries.append(FitsHeaderEntry(key, value))
waveunit = _fits.extract_waveunit(header)
entry.hdu_index = headers.index(header)
if waveunit is None:
waveunit = default_waveunit
unit = None
if waveunit is not None:
try:
unit = u.Unit(waveunit)
except ValueError:
raise WaveunitNotConvertibleError(waveunit)
for header_entry in entry.fits_header_entries:
key, value = header_entry.key, header_entry.value
if key == 'INSTRUME':
entry.instrument = value
elif key == 'WAVELNTH':
if unit is None:
raise WaveunitNotFoundError(file)
# use the value of `unit` to convert the wavelength to nm
entry.wavemin = entry.wavemax = unit.to(
u.nm, value, equivalencies.spectral())
# NOTE: the key DATE-END or DATE_END is not part of the official
# FITS standard, but many FITS files use it in their header
elif key in ('DATE-END', 'DATE_END'):
try:
dt = parse_time(value).datetime
except ValueError:
dt = Time.strptime(value, time_string_parse_format).datetime
entry.observation_time_end = dt
elif key in ('DATE-OBS', 'DATE_OBS'):
try:
dt = parse_time(value).datetime
except ValueError:
dt = Time.strptime(value, time_string_parse_format).datetime
entry.observation_time_start = dt
yield entry
def entries_from_dir(fitsdir, recursive=False, pattern='*',
default_waveunit=None, time_string_parse_format=None):
"""Search the given directory for FITS files and use the corresponding FITS
headers to generate instances of :class:`DatabaseEntry`. FITS files are
detected by reading the content of each file, the ``pattern`` argument may be
used to avoid reading entire directories if one knows that all FITS files
have the same filename extension.
Parameters
----------
fitsdir : str
The directory where to look for FITS files.
recursive : bool, optional
If True, the given directory will be searched recursively. Otherwise,
only the given directory and no subdirectories are searched. The
default is `False`, i.e. the given directory is not searched
recursively.
pattern : str, optional
The pattern can be used to filter the list of filenames before the
files are attempted to be read. The default is to collect all files.
This value is passed to the function :func:`fnmatch.filter`, see its
documentation for more information on the supported syntax.
default_waveunit : str, optional
The wavelength unit that is used for a header if it cannot be found.
time_string_parse_format : str, optional
Fallback timestamp format which will be passed to
`~astropy.time.Time.strptime` if `sunpy.time.parse_time` is unable to
automatically read the ``date-obs`` metadata.
Returns
-------
generator of (DatabaseEntry, str) pairs
A generator where each item is a tuple consisting of a
:class:`DatabaseEntry` instance and the absolute path to the filename
which was used to make the database entry.
Examples
--------
>>> import os
>>> from sunpy.data.test import rootdir as fitsdir
>>> from sunpy.database.tables import entries_from_dir
>>> eitdir = os.path.join(fitsdir, 'EIT')
>>> entries = list(entries_from_dir(eitdir, default_waveunit='angstrom'))
>>> len(entries)
2
"""
for dirpath, dirnames, filenames in os.walk(fitsdir):
filename_paths = (os.path.join(dirpath, name) for name in sorted(filenames))
for path in fnmatch.filter(filename_paths, pattern):
try:
filetype = sunpy_filetools._detect_filetype(path)
except (
sunpy_filetools.UnrecognizedFileTypeError,
sunpy_filetools.InvalidJPEG2000FileExtension):
continue
if filetype == 'fits':
for entry in entries_from_file(
path, default_waveunit,
time_string_parse_format=time_string_parse_format
):
yield entry, path
if not recursive:
break
def _create_display_table(database_entries, columns=None, sort=False):
"""Generate a table to display the database entries.
Parameters
----------
database_entries : list
The :class:`DatabaseEntry`s will be the rows in the resulting table.
columns : list
The column name strings that will be displayed in the resulting table. Possible
values for the strings are all attributes of :class:`DatabaseEntry`.
sort : bool (optional)
If True, sorts the entries before displaying them.
Returns
-------
str
An astropy table that can be printed on the console or written to a
file.
"""
if columns is None:
columns = ['id', 'observation_time_start', 'observation_time_end',
'instrument', 'source', 'provider', 'physobs', 'wavemin',
'wavemax', 'path', 'fileid', 'tags', 'starred',
'download_time', 'size']
data = []
for entry in database_entries:
row = []
for col in columns:
if col == 'starred':
row.append('Yes' if entry.starred else 'No')
elif col == 'tags':
row.append(', '.join(map(str, entry.tags)) or 'N/A')
elif col == 'hdu_index':
row.append(entry.hdu_index)
# do not display microseconds in datetime columns
elif col in (
'observation_time_start',
'observation_time_end',
'download_time'):
time = getattr(entry, col, None)
if time is None:
formatted_time = 'N/A'
else:
formatted_time = time.strftime(TIME_FORMAT)
row.append(formatted_time)
else:
row.append(str(getattr(entry, col) or 'N/A'))
if not row:
raise TypeError('at least one column must be given')
data.append(row)
if not data:
raise TypeError('given iterable is empty')
if sort:
data.sort()
return astropy.table.Table(rows=data, names=columns)
def display_entries(database_entries, columns=None, sort=False):
"""
Print a table to display the database entries.
Parameters
----------
database_entries : iterable of `DatabaseEntry` instances
The database entries will be the rows in the resulting table.
columns : iterable of `str`
The columns that will be displayed in the resulting table. Possible
values for the strings are all attributes of :class:`DatabaseEntry`.
sort : bool, optional
If True, sorts the entries before displaying them.
"""
return _create_display_table(database_entries, columns, sort).__str__()
|
4c53152250c68b3bcffdfc695cbec9b26fd75741
|
0e94b30ee428326fb65a15f6cdbd2826bea282f0
|
/Chapter11/microservices/users_backend/UsersBackend/tests/conftest.py
|
1138212e930ed6f858cb839ae0d383fca801cc36
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-On-Docker-for-Microservices-with-Python
|
a9874ef3e70ad65b8550401e08499129beb87c8d
|
2ed9899e9df566c0a087682b3a85798b1c0380e9
|
refs/heads/master
| 2023-05-11T16:01:07.509199
| 2023-01-30T08:25:38
| 2023-01-30T08:25:38
| 185,544,286
| 200
| 249
|
MIT
| 2023-05-01T20:52:30
| 2019-05-08T06:26:17
|
Python
|
UTF-8
|
Python
| false
| false
| 244
|
py
|
conftest.py
|
import pytest
from users_backend.app import create_app
@pytest.fixture
def app():
application = create_app(script=True)
application.app_context().push()
# Initialise the DB
application.db.create_all()
return application
|
860e921ff847c0b217803ea3cce0fb48ec4d99a9
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/completion/innerClassesNotSuggestedForKeywordPatterns.py
|
5d6487a350cbb0756fb55dcfe526ead0e331265f
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
innerClassesNotSuggestedForKeywordPatterns.py
|
class C:
class InnerClass:
pass
match C():
case C(Inner<caret>):
pass
|
36a8ab92641c133ed96c81165440ed1623c2355d
|
58919431a7fb4da999b8584d5fa7ace5232b3e8a
|
/heudiconv/heuristics/studyforrest_phase2.py
|
c9151d9a1a13c810d38f115dca20efc76c9dbd03
|
[
"Apache-2.0"
] |
permissive
|
nipy/heudiconv
|
1eb767cbf2bcf00b6d08d78584a9ee6b037cab2e
|
bf9b75b34ea002f73cc6cf54189e4de5efcb2a91
|
refs/heads/master
| 2023-08-08T01:00:14.261232
| 2023-07-25T19:55:56
| 2023-07-25T19:55:56
| 42,650,211
| 207
| 141
|
NOASSERTION
| 2023-09-11T13:52:41
| 2015-09-17T10:34:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,031
|
py
|
studyforrest_phase2.py
|
from __future__ import annotations
from typing import Optional
from heudiconv.utils import SeqInfo
scaninfo_suffix = ".json"
def create_key(
template: Optional[str],
outtype: tuple[str, ...] = ("nii.gz",),
annotation_classes: None = None,
) -> tuple[str, tuple[str, ...], None]:
if template is None or not template:
raise ValueError("Template must be a valid format string")
return (template, outtype, annotation_classes)
def infotodict(
seqinfo: list[SeqInfo],
) -> dict[tuple[str, tuple[str, ...], None], list[str]]:
"""Heuristic evaluator for determining which runs belong where
allowed template fields - follow python string module:
item: index within category
subject: participant id
seqitem: run number during scanning
subindex: sub index within group
"""
label_map = {
"movie": "movielocalizer",
"retmap": "retmap",
"visloc": "objectcategories",
}
info: dict[tuple[str, tuple[str, ...], None], list[str]] = {}
for s in seqinfo:
if "EPI_3mm" not in s.protocol_name:
continue
label = s.protocol_name.split("_")[2].split()[0].strip("1234567890").lower()
if label in ("movie", "retmap", "visloc"):
key = create_key(
"ses-localizer/func/{subject}_ses-localizer_task-%s_run-{item:01d}_bold"
% label_map[label]
)
elif label == "sense":
# pilot retmap had different description
key = create_key(
"ses-localizer/func/{subject}_ses-localizer_task-retmap_run-{item:01d}_bold"
)
elif label == "r":
key = create_key(
"ses-movie/func/{subject}_ses-movie_task-movie_run-%i_bold"
% int(s.protocol_name.split("_")[2].split()[0][-1])
)
else:
raise RuntimeError("YOU SHALL NOT PASS!")
if key not in info:
info[key] = []
info[key].append(s.series_id)
return info
|
d073be17373f1ca4b0caa4905379392de10327c4
|
93134d8429cc7c5251ea76e19bf1856466bd2b48
|
/trajnetbaselines/classical/orca.py
|
0eb3d97bedce3cf0d3106982c0930bd8d1b9f336
|
[
"MIT"
] |
permissive
|
vita-epfl/trajnetplusplusbaselines
|
0bd7ce75740f3ed39ba82d1bfaa5d3c279da474f
|
99a6e9d8675face1aeeb17227b73dd3d1267f463
|
refs/heads/master
| 2023-04-14T23:21:54.217111
| 2022-10-04T09:52:01
| 2022-10-04T09:52:01
| 218,013,163
| 221
| 83
|
MIT
| 2023-04-04T15:07:11
| 2019-10-28T09:42:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,832
|
py
|
orca.py
|
import numpy as np
from scipy.interpolate import interp1d
import rvo2
import trajnetplusplustools
MAX_SPEED_MULTIPLIER = 1.3 # with respect to initial speed
def predict(input_paths, dest_dict=None, dest_type='interp', orca_params=[1.5, 1.5, 0.4],
predict_all=True, n_predict=12, obs_length=9):
pred_length = n_predict
def init_states(input_paths, sim, start_frame, dest_dict, dest_type):
positions, goals, speed = [], [], []
for i, _ in enumerate(input_paths):
path = input_paths[i]
ped_id = path[0].pedestrian
past_path = [t for t in path if t.frame <= start_frame]
future_path = [t for t in path if t.frame > start_frame]
past_frames = [t.frame for t in path if t.frame <= start_frame]
len_path = len(past_path)
## To consider agent or not consider.
if start_frame in past_frames:
curr = past_path[-1]
## Velocity
if len_path >= 4:
stride = 3
prev = past_path[-4]
else:
stride = len_path - 1
prev = past_path[-len_path]
curr_vel, curr_speed = vel_state(prev, curr, stride)
max_speed = MAX_SPEED_MULTIPLIER * curr_speed
## Destination
if dest_type == 'true':
if dest_dict is not None:
[d_x, d_y] = dest_dict[ped_id]
else:
raise ValueError
elif dest_type == 'interp':
[d_x, d_y] = dest_state(past_path, len_path)
elif dest_type == 'pred_end':
[d_x, d_y] = [future_path[-1].x, future_path[-1].y]
else:
raise NotImplementedError
positions.append((curr.x, curr.y))
speed.append((curr_speed))
goals.append((d_x, d_y))
sim.addAgent((curr.x, curr.y), maxSpeed=max_speed, velocity=tuple(curr_vel))
trajectories = [[positions[i]] for i in range(len(positions))]
return trajectories, positions, goals, speed
def vel_state(prev, curr, stride):
if stride == 0:
return [0, 0], 0
diff = np.array([curr.x - prev.x, curr.y - prev.y])
theta = np.arctan2(diff[1], diff[0])
speed = np.linalg.norm(diff) / (stride * 0.4)
return [speed*np.cos(theta), speed*np.sin(theta)], speed
def dest_state(path, length):
if length == 1:
return [path[-1].x, path[-1].y]
x = [t.x for t in path]
y = [t.y for t in path]
time = list(range(length))
f = interp1d(x=time, y=[x, y], fill_value='extrapolate')
return f(time[-1] + pred_length)
multimodal_outputs = {}
primary = input_paths[0]
neighbours_tracks = []
frame_diff = primary[1].frame - primary[0].frame
start_frame = primary[obs_length-1].frame
first_frame = primary[obs_length-1].frame + frame_diff
fps = 20
sampling_rate = fps / 2.5
## orca_params = [nDist, nReact, radius]
## Parameters freq nD obD nR oR rad max.spd
sim = rvo2.PyRVOSimulator(1 / fps, orca_params[0], 10, orca_params[1], 5, orca_params[2], 1.5)
# initialize
trajectories, _, goals, speed = init_states(input_paths, sim, start_frame, dest_dict, dest_type)
num_ped = len(speed)
count = 0
end_range = 0.05
##Simulate a scene
while count < sampling_rate * pred_length + 1:
count += 1
sim.doStep()
for i in range(num_ped):
if count == 1:
trajectories[i].pop(0)
position = sim.getAgentPosition(i)
if count % sampling_rate == 0:
trajectories[i].append(position)
# check if this agent reaches the goal
if np.linalg.norm(np.array(position) - np.array(goals[i])) < end_range:
sim.setAgentPrefVelocity(i, (0, 0))
else:
# Move towards goal
velocity = np.array((goals[i][0] - position[0], goals[i][1] - position[1]))
curr_speed = np.linalg.norm(velocity)
pref_vel = speed[i] * velocity / curr_speed if curr_speed > speed[i] else velocity
sim.setAgentPrefVelocity(i, tuple(pref_vel.tolist()))
states = np.array(trajectories).transpose(1, 0, 2)
# predictions
primary_track = states[:, 0, 0:2]
neighbours_tracks = states[:, 1:, 0:2]
## Primary Prediction Only
if not predict_all:
neighbours_tracks = []
# Unimodal Prediction
multimodal_outputs[0] = primary_track, neighbours_tracks
return multimodal_outputs
|
aece7e53fb10b8b9ffd08452321335983a61746a
|
d8810093406a2ba401ac6ea300ed414bfab6b6a0
|
/kraken/ketos/util.py
|
e71b535053c08b1372076b6a6180429000c16b68
|
[
"Apache-2.0"
] |
permissive
|
mittagessen/kraken
|
11b8eeaff5dcfaa62a96b0af73ebe65bc2d9bef2
|
3e966a3e5c881394b882da95fa5941c4305aec43
|
refs/heads/main
| 2023-08-29T05:52:06.301403
| 2023-08-17T11:25:56
| 2023-08-17T11:25:56
| 35,872,353
| 565
| 140
|
Apache-2.0
| 2023-09-08T10:28:33
| 2015-05-19T09:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
util.py
|
#
# Copyright 2022 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.ketos.util
~~~~~~~~~~~~~~~~~~~~
Command line driver helpers
"""
import os
import glob
import click
import logging
from typing import List, Optional, Tuple
logging.captureWarnings(True)
logger = logging.getLogger('kraken')
def _validate_manifests(ctx, param, value):
images = []
for manifest in value:
for entry in manifest.readlines():
im_p = entry.rstrip('\r\n')
if os.path.isfile(im_p):
images.append(im_p)
else:
logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name))
return images
def _expand_gt(ctx, param, value):
images = []
for expression in value:
images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])
return images
def message(msg, **styles):
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
def to_ptl_device(device: str) -> Tuple[str, Optional[List[int]]]:
if any([device == x for x in ['cpu', 'mps']]):
return device, 'auto'
elif any([device.startswith(x) for x in ['tpu', 'cuda', 'hpu', 'ipu']]):
dev, idx = device.split(':')
if dev == 'cuda':
dev = 'gpu'
return dev, [int(idx)]
raise Exception(f'Invalid device {device} specified')
|
8029110e4858b804d6748afd822d2d620db3386a
|
9b1eda0abdc5dea7c6e9695ff4e1098abe0a708b
|
/tests/snapshot_tests/snapshot_apps/fr_margins.py
|
204e6154f8a8cf31f99a03bdc60dcafd03abd988
|
[
"MIT"
] |
permissive
|
Textualize/textual
|
b8cf4b5d18069fccc7623b3116436f479e1ef446
|
b74ac1e47fdd16133ca567390c99ea19de278c5a
|
refs/heads/main
| 2023-08-30T21:40:21.563823
| 2023-08-30T10:18:27
| 2023-08-30T10:18:27
| 355,959,597
| 14,818
| 588
|
MIT
| 2023-09-14T20:22:02
| 2021-04-08T15:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 780
|
py
|
fr_margins.py
|
from textual.app import App, ComposeResult
from textual.widgets import Label
from textual.containers import Container
# Test fr dimensions and margins work in an auto container
# https://github.com/Textualize/textual/issues/2220
class TestApp(App):
CSS = """
Container {
background: green 20%;
border: heavy green;
width: auto;
height: auto;
overflow: hidden;
}
Label {
background: green 20%;
width: 1fr;
height: 1fr;
margin: 2 2;
}
"""
def compose(self) -> ComposeResult:
with Container():
yield Label("Hello")
yield Label("World")
yield Label("!!")
if __name__ == "__main__":
app = TestApp()
app.run()
|
ebff52eba75860e34806d305cc443092f9f95907
|
4fafaf50c41b1ad99849248f9872a199f7df2d0d
|
/git_code_debt/metrics/curse_words.py
|
cdc97b163b4e145298df40d32b54e4c76867aed8
|
[
"MIT"
] |
permissive
|
asottile/git-code-debt
|
3dd05a58a19f8d55ea8244f6eaf6d8d6ecb99f33
|
16b60f43dd6ee22d9fe074541d2418411dda03c4
|
refs/heads/main
| 2023-08-11T18:28:12.813666
| 2023-08-01T15:55:52
| 2023-08-01T15:55:52
| 14,399,837
| 287
| 18
|
MIT
| 2023-09-05T13:06:16
| 2013-11-14T16:05:41
|
Python
|
UTF-8
|
Python
| false
| false
| 5,438
|
py
|
curse_words.py
|
from __future__ import annotations
word_list = {b'4r5e', b'5h1t', b'5hit', b'a55', b'anal', b'anus', b'ar5e', b'arrse', b'arse', b'ass', b'ass-fucker', b'asses', b'assfucker', b'assfukka', b'asshole', b'assholes', b'asswhole', b'a_s_s', b'b!tch', b'b00bs', b'b17ch', b'b1tch', b'ballbag', b'balls', b'ballsack', b'bastard', b'beastial', b'beastiality', b'bellend', b'bestial', b'bestiality', b'bi+ch', b'biatch', b'bitch', b'bitcher', b'bitchers', b'bitches', b'bitchin', b'bitching', b'bloody', b'blow job', b'blowjob', b'blowjobs', b'boiolas', b'bollock', b'bollok', b'boner', b'boob', b'boobs', b'booobs', b'boooobs', b'booooobs', b'booooooobs', b'breasts', b'buceta', b'bugger', b'bum', b'bunny fucker', b'butt', b'butthole', b'buttmuch', b'buttplug', b'c0ck', b'c0cksucker', b'carpet muncher', b'cawk', b'chink', b'cipa', b'cl1t', b'clit', b'clitoris', b'clits', b'cnut', b'cock', b'cock-sucker', b'cockface', b'cockhead', b'cockmunch', b'cockmuncher', b'cocks', b'cocksuck', b'cocksucked', b'cocksucker', b'cocksucking', b'cocksucks', b'cocksuka', b'cocksukka', b'cok', b'cokmuncher', b'coksucka', b'coon', b'cox', b'crap', b'cum', b'cummer', b'cumming', b'cums', b'cumshot', b'cunilingus', b'cunillingus', b'cunnilingus', b'cunt', b'cuntlick', b'cuntlicker', b'cuntlicking', b'cunts', b'cyalis', b'cyberfuc', b'cyberfuck', b'cyberfucked', b'cyberfucker', b'cyberfuckers', b'cyberfucking', b'd1ck', b'damn', b'dick', b'dickhead', b'dildo', b'dildos', b'dink', b'dinks', b'dirsa', b'dlck', b'dog-fucker', b'doggin', b'dogging', b'donkeyribber', b'doosh', b'duche', b'dyke', b'ejaculate', b'ejaculated', b'ejaculates', b'ejaculating', b'ejaculatings', b'ejaculation', b'ejakulate', b'f u c k', b'f u c k e r', b'f4nny', b'fag', b'fagging', b'faggitt', b'faggot', b'faggs', b'fagot', b'fagots', b'fags', b'fanny', b'fannyflaps', b'fannyfucker', b'fanyy', b'fatass', b'fcuk', b'fcuker', b'fcuking', b'feck', b'fecker', b'felching', b'fellate', b'fellatio', b'fingerfuck', b'fingerfucked', b'fingerfucker', b'fingerfuckers', b'fingerfucking', b'fingerfucks', b'fistfuck', b'fistfucked', b'fistfucker', b'fistfuckers', b'fistfucking', b'fistfuckings', b'fistfucks', b'flange', b'fook', b'fooker', b'fuck', b'fucka', b'fucked', b'fucker', b'fuckers', b'fuckhead', b'fuckheads', b'fuckin', b'fucking', b'fuckings', b'fuckingshitmotherfucker', b'fuckme', b'fucks', b'fuckwhit', b'fuckwit', b'fudge packer', b'fudgepacker', b'fuk', b'fuker', b'fukker', b'fukkin', b'fuks', b'fukwhit', b'fukwit', b'fux', b'fux0r', b'f_u_c_k', b'gangbang', b'gangbanged', b'gangbangs', b'gaylord', b'gaysex', b'goatse', b'God', b'god-dam', b'god-damned', b'goddamn', b'goddamned', b'hardcoresex', b'hell', b'heshe', b'hoar', b'hoare', b'hoer', b'homo', b'hore', b'horniest', b'horny', b'hotsex', b'jack-off', b'jackoff', b'jap', b'jerk-off', b'jism', b'jiz', b'jizm', b'jizz', b'kawk', b'knob', b'knobead', b'knobed', b'knobend', b'knobhead', b'knobjocky', b'knobjokey', b'kock', b'kondum', b'kondums', b'kum', b'kummer', b'kumming', b'kums', b'kunilingus', b'l3i+ch', b'l3itch', b'labia', b'lmfao', b'lust', b'lusting', b'm0f0', b'm0fo', b'm45terbate', b'ma5terb8', b'ma5terbate', b'masochist', b'master-bate', b'masterb8', b'masterbat*', b'masterbat3', b'masterbate', b'masterbation', b'masterbations', b'masturbate', b'mo-fo', b'mof0', b'mofo', b'mothafuck', b'mothafucka', b'mothafuckas', b'mothafuckaz', b'mothafucked', b'mothafucker', b'mothafuckers', b'mothafuckin', b'mothafucking', b'mothafuckings', b'mothafucks', b'mother fucker', b'motherfuck', b'motherfucked', b'motherfucker', b'motherfuckers', b'motherfuckin', b'motherfucking', b'motherfuckings', b'motherfuckka', b'motherfucks', b'muff', b'mutha', b'muthafecker', b'muthafuckker', b'muther', b'mutherfucker', b'n1gga', b'n1gger', b'nazi', b'nigg3r', b'nigg4h', b'nigga', b'niggah', b'niggas', b'niggaz', b'nigger', b'niggers', b'nob', b'nob jokey', b'nobhead', b'nobjocky', b'nobjokey', b'numbnuts', b'nutsack', b'orgasim', b'orgasims', b'orgasm', b'orgasms', b'p0rn', b'pawn', b'pecker', b'penis', b'penisfucker', b'phonesex', b'phuck', b'phuk', b'phuked', b'phuking', b'phukked', b'phukking', b'phuks', b'phuq', b'pigfucker', b'pimpis', b'piss', b'pissed', b'pisser', b'pissers', b'pisses', b'pissflaps', b'pissin', b'pissing', b'pissoff', b'poop', b'porn', b'porno', b'pornography', b'pornos', b'prick', b'pricks', b'pron', b'pube', b'pusse', b'pussi', b'pussies', b'pussy', b'pussys', b'rectum', b'retard', b'rimjaw', b'rimming', b's hit', b's.o.b.', b'sadist', b'schlong', b'screwing', b'scroat', b'scrote', b'scrotum', b'semen', b'sex', b'sh!+', b'sh!t', b'sh1t', b'shag', b'shagger', b'shaggin', b'shagging', b'shemale', b'shi+', b'shit', b'shitdick', b'shite', b'shited', b'shitey', b'shitfuck', b'shitfull', b'shithead', b'shiting', b'shitings', b'shits', b'shitted', b'shitter', b'shitters', b'shitting', b'shittings', b'shitty', b'skank', b'slut', b'sluts', b'smegma', b'smut', b'snatch', b'son-of-a-bitch', b'spac', b'spunk', b's_h_i_t', b't1tt1e5', b't1tties', b'teets', b'teez', b'testical', b'testicle', b'tit', b'titfuck', b'tits', b'titt', b'tittie5', b'tittiefucker', b'titties', b'tittyfuck', b'tittywank', b'titwank', b'tosser', b'turd', b'tw4t', b'twat', b'twathead', b'twatty', b'twunt', b'twunter', b'v14gra', b'v1gra', b'vagina', b'viagra', b'vulva', b'w00se', b'wang', b'wank', b'wanker', b'wanky', b'whoar', b'whore', b'willies', b'willy', b'xrated', b'xxx'} # noqa
|
6eab14306e112f1281d23300eddc302aeb8e3e47
|
e0ed4496e94263643cedea56bfcdec1140ced8d6
|
/neupy/architectures/__init__.py
|
562eae9865fc341065aa497b90baea7b13a0af92
|
[
"MIT"
] |
permissive
|
itdxer/neupy
|
6307666271807bd9028e3e60dd2536a544ed8421
|
317ed4204b5239e8be2b94a95fe3157c5f9edc65
|
refs/heads/master
| 2023-06-13T23:09:36.487633
| 2023-01-03T21:24:56
| 2023-01-03T21:24:56
| 41,323,480
| 840
| 206
|
MIT
| 2022-12-16T16:32:10
| 2015-08-24T19:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 125
|
py
|
__init__.py
|
from .vgg16 import *
from .vgg19 import *
from .squeezenet import *
from .resnet import *
from .mixture_of_experts import *
|
1f32144404465786d3b76b86fc3961405c15009e
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/FeedCyrenThreatInDepth/Scripts/CyrenThreatInDepthRelatedWidget/CyrenThreatInDepthRelatedWidget.py
|
6f194dac8ed9bb01e65d363e826be48aa3a51d80
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 877
|
py
|
CyrenThreatInDepthRelatedWidget.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
def cyren_feed_relationship(args) -> CommandResults:
indicator = args.get("indicator")
if not indicator:
raise ValueError("Please provide 'indicator' argument!")
result = demisto.executeCommand("CyrenThreatInDepthRenderRelated", dict(indicator=json.dumps(indicator)))
if is_error(result[0]):
raise ValueError(f"Failed to render related: {str(get_error(result))}")
readable = result[0]["HumanReadable"]
return CommandResults(readable_output=readable)
def main(args):
try:
return_results(cyren_feed_relationship(args))
except Exception as e:
return_error(f"Failed to execute CyrenThreatInDepthRelatedWidget. Error: {str(e)}")
if __name__ in ("__main__", "__builtin__", "builtins"):
main(demisto.args())
|
20e652e97b17fe2d530c3da95a1fa0fa91b0b882
|
8b5d61f17ab2e4c158270cf6dda79f9a47870df1
|
/sknetwork/topology/weisfeiler_lehman.py
|
572b9711a26af5fecfce0573fce6c1f4e6d0d27c
|
[
"BSD-3-Clause"
] |
permissive
|
sknetwork-team/scikit-network
|
55a5ecbbbd2dfc78095aa74f3953c770357cadbb
|
95cec38d56b086b95616d2f1d13a9b98c6c8b534
|
refs/heads/master
| 2023-09-03T21:56:42.345214
| 2023-05-22T14:12:57
| 2023-05-22T14:12:57
| 135,287,970
| 581
| 73
|
NOASSERTION
| 2023-07-21T05:42:25
| 2018-05-29T11:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,308
|
py
|
weisfeiler_lehman.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created in July 2020
@author: Pierre Pebereau <pierre.pebereau@telecom-paris.fr>
@author: Alexis Barreaux <alexis.barreaux@telecom-paris.fr>
"""
from typing import Union
import numpy as np
from scipy import sparse
from sknetwork.topology.weisfeiler_lehman_core import weisfeiler_lehman_coloring
from sknetwork.utils.check import check_format, check_square
def color_weisfeiler_lehman(adjacency: Union[sparse.csr_matrix, np.ndarray], max_iter: int = -1) -> np.ndarray:
"""Color nodes using Weisfeiler-Lehman algorithm.
Parameters
----------
adjacency : sparse.csr_matrix
Adjacency matrix of the graph
max_iter : int
Maximum number of iterations. Negative value means no limit (until convergence).
Returns
-------
labels : np.ndarray
Label of each node.
Example
-------
>>> from sknetwork.data import house
>>> adjacency = house()
>>> labels = color_weisfeiler_lehman(adjacency)
>>> print(labels)
[0 2 1 1 2]
References
----------
* Douglas, B. L. (2011).
`The Weisfeiler-Lehman Method and Graph Isomorphism Testing.
<https://arxiv.org/pdf/1101.5211.pdf>`_
* Shervashidze, N., Schweitzer, P., van Leeuwen, E. J., Melhorn, K., Borgwardt, K. M. (2011)
`Weisfeiler-Lehman graph kernels.
<https://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf>`_
Journal of Machine Learning Research 12, 2011.
"""
adjacency = check_format(adjacency, allow_empty=True)
check_square(adjacency)
n_nodes = adjacency.shape[0]
if max_iter < 0 or max_iter > n_nodes:
max_iter = n_nodes
labels = np.zeros(n_nodes, dtype=np.int32)
powers = (-np.pi / 3.15) ** np.arange(n_nodes, dtype=np.double)
indptr = adjacency.indptr
indices = adjacency.indices
labels, _ = weisfeiler_lehman_coloring(indptr, indices, labels, powers, max_iter)
return np.array(labels)
def are_isomorphic(adjacency1: sparse.csr_matrix, adjacency2: sparse.csr_matrix, max_iter: int = -1) -> bool:
"""Weisfeiler-Lehman isomorphism test. If the test is False, the graphs cannot be isomorphic.
Parameters
-----------
adjacency1 :
First adjacency matrix.
adjacency2 :
Second adjacency matrix.
max_iter : int
Maximum number of iterations. Negative value means no limit (until convergence).
Returns
-------
test_result : bool
Example
-------
>>> from sknetwork.data import house, bow_tie
>>> are_isomorphic(house(), bow_tie())
False
References
----------
* Douglas, B. L. (2011).
`The Weisfeiler-Lehman Method and Graph Isomorphism Testing.
<https://arxiv.org/pdf/1101.5211.pdf>`_
* Shervashidze, N., Schweitzer, P., van Leeuwen, E. J., Melhorn, K., Borgwardt, K. M. (2011)
`Weisfeiler-Lehman graph kernels.
<https://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf>`_
Journal of Machine Learning Research 12, 2011.
"""
adjacency1 = check_format(adjacency1)
check_square(adjacency1)
adjacency2 = check_format(adjacency2)
check_square(adjacency2)
if (adjacency1.shape != adjacency2.shape) or (adjacency1.nnz != adjacency2.nnz):
return False
n_nodes = adjacency1.shape[0]
if max_iter < 0 or max_iter > n_nodes:
max_iter = n_nodes
indptr1 = adjacency1.indptr
indptr2 = adjacency2.indptr
indices1 = adjacency1.indices
indices2 = adjacency2.indices
labels1 = np.zeros(n_nodes, dtype=np.int32)
labels2 = np.zeros(n_nodes, dtype=np.int32)
powers = (-np.pi / 3.15) ** np.arange(n_nodes, dtype=np.double)
iteration = 0
has_changed1, has_changed2 = True, True
while iteration < max_iter and (has_changed1 or has_changed2):
labels1, has_changed1 = weisfeiler_lehman_coloring(indptr1, indices1, labels1, powers, max_iter=1)
labels2, has_changed2 = weisfeiler_lehman_coloring(indptr2, indices2, labels2, powers, max_iter=1)
_, counts1 = np.unique(np.array(labels1), return_counts=True)
_, counts2 = np.unique(np.array(labels2), return_counts=True)
if (counts1 != counts2).any():
return False
iteration += 1
return True
|
55f5f4d2847e72f4a82394780c8d41c74689b0dd
|
a3e2d421f94a8adf2c41ff1d093b5a06de1448d6
|
/product/runtime/src/main/python/java/_vendor/elftools/dwarf/abbrevtable.py
|
6d29d5cfe8ca3fc9a69efe295c50453e0f4ced03
|
[
"MIT"
] |
permissive
|
chaquo/chaquopy
|
09ef057015a756ce9b862732477b2549562720b4
|
e09bbe6ca5efd859d484b01e30131ccc944aa2b6
|
refs/heads/master
| 2023-08-31T22:09:22.230601
| 2023-08-31T13:07:57
| 2023-08-31T13:07:57
| 95,140,462
| 607
| 121
|
MIT
| 2023-09-13T19:17:29
| 2017-06-22T17:33:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,546
|
py
|
abbrevtable.py
|
#-------------------------------------------------------------------------------
# elftools: dwarf/abbrevtable.py
#
# DWARF abbreviation table
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
from ..common.utils import struct_parse, dwarf_assert
class AbbrevTable(object):
""" Represents a DWARF abbreviation table.
"""
def __init__(self, structs, stream, offset):
""" Create new abbreviation table. Parses the actual table from the
stream and stores it internally.
structs:
A DWARFStructs instance for parsing the data
stream, offset:
The stream and offset into the stream where this abbreviation
table lives.
"""
self.structs = structs
self.stream = stream
self.offset = offset
self._abbrev_map = self._parse_abbrev_table()
def get_abbrev(self, code):
""" Get the AbbrevDecl for a given code. Raise KeyError if no
declaration for this code exists.
"""
return self._abbrev_map[code]
def _parse_abbrev_table(self):
""" Parse the abbrev table from the stream
"""
map = {}
self.stream.seek(self.offset)
while True:
decl_code = struct_parse(
struct=self.structs.Dwarf_uleb128(''),
stream=self.stream)
if decl_code == 0:
break
declaration = struct_parse(
struct=self.structs.Dwarf_abbrev_declaration,
stream=self.stream)
map[decl_code] = AbbrevDecl(decl_code, declaration)
return map
class AbbrevDecl(object):
""" Wraps a parsed abbreviation declaration, exposing its fields with
dict-like access, and adding some convenience methods.
The abbreviation declaration represents an "entry" that points to it.
"""
def __init__(self, code, decl):
self.code = code
self.decl = decl
def has_children(self):
""" Does the entry have children?
"""
return self['children_flag'] == 'DW_CHILDREN_yes'
def iter_attr_specs(self):
""" Iterate over the attribute specifications for the entry. Yield
(name, form) pairs.
"""
for attr_spec in self['attr_spec']:
yield attr_spec.name, attr_spec.form
def __getitem__(self, entry):
return self.decl[entry]
|
b67307bebb573f31df58371dd2f6d3736263c88c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1Trigger/L1TNtuples/python/RelValInputFiles.py
|
0081ce9db035a9d5edc89016f79890730952381d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 692
|
py
|
RelValInputFiles.py
|
# these files allow to test the workflows starting from different event content
# AOD
def RelValInputFile_AOD():
return '/store/relval/CMSSW_7_5_0_pre1/RelValProdTTbar_13/AODSIM/MCRUN2_74_V7-v1/00000/48159643-5EE3-E411-818F-0025905A48F0.root'
# DIGI (only available in RelVal, not a realistic workflow)
def RelValInputFile_DIGI():
return ''
# RAW (need to run RawToDigi to use this)
def RelValInputFile_RAW():
return '/store/relval/CMSSW_7_5_0_pre4/RelValProdTTbar_13/GEN-SIM-RAW/MCRUN2_75_V1-v1/00000/1CFADAF5-E1F5-E411-A406-0025905A60D6.root'
#'/store/relval/CMSSW_7_5_0_pre1/RelValProdTTbar_13/GEN-SIM-RAW/MCRUN2_74_V7-v1/00000/0CEB1526-6CE3-E411-82B6-00261894386C.root'
|
db0107fd6facf1518e9f559c34fd4ffe70a9f46c
|
30a03b509ed189f6a0e0574883a255774965e93e
|
/flightrl/setup.py
|
1f6a44e29e791c70458ffd587dac2abd3d35ad53
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uzh-rpg/flightmare
|
efdf34f546348b870307e4c375b0bf82fbf256d2
|
d4218aedac18cbe9364a0a0df10ab992c4b65e4f
|
refs/heads/master
| 2023-08-13T17:25:46.427864
| 2023-05-15T08:41:51
| 2023-05-15T08:41:51
| 279,581,575
| 811
| 346
|
NOASSERTION
| 2023-04-18T04:30:00
| 2020-07-14T12:40:25
|
C++
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
setup.py
|
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
setup(
name='rpg_baselines',
version='0.0.1',
author='Yunlong Song',
author_email='song@ifi.uzh.ch',
description='Flightmare: A Quadrotor Simulator.',
long_description='',
install_requires=['gym==0.11', 'ruamel.yaml',
'numpy', 'stable_baselines==2.10.1'],
packages=['rpg_baselines'],
)
|
b07f3f7d973b2493f8fe0813e276cb13a996cd41
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-codehub/huaweicloudsdkcodehub/v3/model/add_deploy_key_request_body.py
|
b7386de5309f296c789781b00cb93f0c8fd734aa
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,522
|
py
|
add_deploy_key_request_body.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AddDeployKeyRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'application': 'str',
'can_push': 'bool',
'key': 'str',
'key_title': 'str'
}
attribute_map = {
'application': 'application',
'can_push': 'can_push',
'key': 'key',
'key_title': 'key_title'
}
def __init__(self, application=None, can_push=None, key=None, key_title=None):
"""AddDeployKeyRequestBody
The model defined in huaweicloud sdk
:param application: 部署使用的SSH密钥的来源
:type application: str
:param can_push: 部署使用的SSH密钥是否可以推送代码
:type can_push: bool
:param key: 部署使用的SSH密钥
:type key: str
:param key_title: 部署使用的SSH密钥名称
:type key_title: str
"""
self._application = None
self._can_push = None
self._key = None
self._key_title = None
self.discriminator = None
self.application = application
self.can_push = can_push
self.key = key
self.key_title = key_title
@property
def application(self):
"""Gets the application of this AddDeployKeyRequestBody.
部署使用的SSH密钥的来源
:return: The application of this AddDeployKeyRequestBody.
:rtype: str
"""
return self._application
@application.setter
def application(self, application):
"""Sets the application of this AddDeployKeyRequestBody.
部署使用的SSH密钥的来源
:param application: The application of this AddDeployKeyRequestBody.
:type application: str
"""
self._application = application
@property
def can_push(self):
"""Gets the can_push of this AddDeployKeyRequestBody.
部署使用的SSH密钥是否可以推送代码
:return: The can_push of this AddDeployKeyRequestBody.
:rtype: bool
"""
return self._can_push
@can_push.setter
def can_push(self, can_push):
"""Sets the can_push of this AddDeployKeyRequestBody.
部署使用的SSH密钥是否可以推送代码
:param can_push: The can_push of this AddDeployKeyRequestBody.
:type can_push: bool
"""
self._can_push = can_push
@property
def key(self):
"""Gets the key of this AddDeployKeyRequestBody.
部署使用的SSH密钥
:return: The key of this AddDeployKeyRequestBody.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this AddDeployKeyRequestBody.
部署使用的SSH密钥
:param key: The key of this AddDeployKeyRequestBody.
:type key: str
"""
self._key = key
@property
def key_title(self):
"""Gets the key_title of this AddDeployKeyRequestBody.
部署使用的SSH密钥名称
:return: The key_title of this AddDeployKeyRequestBody.
:rtype: str
"""
return self._key_title
@key_title.setter
def key_title(self, key_title):
"""Sets the key_title of this AddDeployKeyRequestBody.
部署使用的SSH密钥名称
:param key_title: The key_title of this AddDeployKeyRequestBody.
:type key_title: str
"""
self._key_title = key_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddDeployKeyRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
a770833a5c93ca1624392d0cee5bbbcefcbb1d9c
|
2a64017ddbf28d00109ed4e3105e1f7ccc78ccc1
|
/forms101/forms/contact_form.py
|
47531f8a2cb3f5de020348f2360b61b221853c0b
|
[] |
no_license
|
viewflow/cookbook
|
25f9379510ced08dbdbae9d5cfb3df0c68f95a08
|
04189977cd894b5166d088949f30e07189889189
|
refs/heads/main
| 2023-08-17T09:22:26.226335
| 2023-08-11T15:12:23
| 2023-08-11T15:12:23
| 35,273,541
| 286
| 179
| null | 2023-01-19T10:44:18
| 2015-05-08T10:32:49
|
Python
|
UTF-8
|
Python
| false
| false
| 813
|
py
|
contact_form.py
|
from django import forms
from viewflow.forms import Layout, Row
from . import Form
class ContactForm(Form):
name = forms.CharField(
widget=forms.TextInput(
attrs={"leading-icon": "account_box"},
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={"leading-icon": "email"},
),
)
subject = forms.CharField(
widget=forms.TextInput(
attrs={"leading-icon": "announcement"},
)
)
message = forms.CharField(
widget=forms.Textarea(attrs={"rows": 5}),
)
send_copy = forms.BooleanField(
required=False,
label="Send a copy to my e-mail address",
)
layout = Layout(
Row("name", "email"),
"subject",
"message",
"send_copy",
)
|
1daf59aa1bcdd15bb083f50f192ac5e94f933182
|
67c0bc2b2292857fcc19b3c6e6da5570dc09749c
|
/chapter_4_modeling/train_audioregression.py
|
3af115a0b6fc0c7cb74f82b968d73331cc7bf244
|
[
"Apache-2.0"
] |
permissive
|
jim-schwoebel/voicebook
|
9d28f638fa6a31cb8c4915f9871c07da261b3ea6
|
0e8eae0f01487f15589c0daa2cf7ca3c6f3b8ad3
|
refs/heads/master
| 2022-12-11T13:41:24.005431
| 2021-04-15T13:51:35
| 2021-04-15T13:51:35
| 137,778,789
| 363
| 84
|
Apache-2.0
| 2022-12-08T03:58:01
| 2018-06-18T16:37:37
|
Python
|
UTF-8
|
Python
| false
| false
| 32,618
|
py
|
train_audioregression.py
|
'''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## TRAIN_AUDIOREGRESSION.PY ##
================================================
Script that overviews implementation of various regression algorithms.
In this way, you can more easily get started on regression projects.
Goes through examples of:
1) linear regression
2) ridge regression
3) lasso regression
4) multi-task lasso
5) elastic net
6) multi-task elastic net
7) least-angle regression (LARS)
8) LARS lasso
9) orthogonal matching pursuit
10) bayesian ridge regression
11) automatic relevance determination
12) logistic regression
13) stochastic gradient descent
14) perceptron algorithms
15) pass-agressive algorithms
16) RANSAC
17) Theil-Sen
18) Huber Regression
19) Polynomial regression
These are the metrics for linear regression
# metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
# metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
# metrics.mean_squared_error(y_true, y_pred[, …]) Mean squared error regression loss
# metrics.mean_squared_log_error(y_true, y_pred) Mean squared logarithmic error regression loss
# metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
# metrics.r2_score(y_true, y_pred[, …]) R^2 (coefficient of determination) regression score function.
For more information about regression, feel free to read the
Scikit-learn linear model documentation here:
http://scikit-learn.org/stable/modules/linear_model.html
'''
##################################################
## IMPORT STATMENTS ##
##################################################
import os, json, xlsxwriter, pickle, shutil
from sklearn import linear_model
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Perceptron
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import cross_val_predict
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import numpy as np
from beautifultable import BeautifulTable
# helper function
# eliminates redundant code
def update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores):
try:
explained_variances.append(metrics.explained_variance_score(y_test,predictions))
except:
explained_variances.append('n/a')
try:
mean_absolute_errors.append(metrics.mean_absolute_error(y_test,predictions))
except:
mean_squared_errors.append('n/a')
try:
mean_squared_log_errors.append(metrics.mean_squared_log_error(y_test,predictions))
except:
mean_squared_log_errors.append('n/a')
try:
median_absolute_errors.append(metrics.median_absolute_error(y_test,predictions))
except:
median_absolute_errors.append('n/a')
try:
r2_scores.append(metrics.r2_score(y_test,predictions))
except:
r2_scores.append('n/a')
return explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores
##################################################
##################################################
## MAIN CODE BASE ##
##################################################
##################################################
import warnings
# numpy issues lots of warnings, so we can suppress them
warnings.filterwarnings("ignore")
# model dir
modeldir=os.getcwd()+'/models'
# load data
os.chdir(os.getcwd()+'/data')
name=input('what is the name of the file in /data directory you would like to analyze? \n')
i1=name.find('.json')
first=name[0:i1]
# assume binary classification
i2=first.find('_')
one=first[0:i2]
two=first[i2+1:]
g=json.load(open(name))
# get data
aa=g[one]
co=g[two]
# prepare data into train and test tests
# take first 104 features
labels=list()
data=list()
for i in range(len(aa)):
data.append(np.array(aa[i]))
labels.append(float(0))
for i in range(len(co)):
data.append(np.array(co[i]))
labels.append(float(1))
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.33, random_state=42)
# 199 = len(X_train)
# 99 = len(X_test)
# metrics
modeltypes=list()
explained_variances=list()
mean_absolute_errors=list()
mean_squared_errors=list()
mean_squared_log_errors=list()
median_absolute_errors=list()
r2_scores=list()
os.chdir(modeldir)
# make a temp folder to dump files into
foldername=one+'_'+two+'_regression'
tempdir=os.getcwd()+'/'+foldername
try:
os.mkdir(foldername)
os.chdir(foldername)
except:
shutil.rmtree(foldername)
os.mkdir(foldername)
os.chdir(foldername)
# metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
# metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
# metrics.mean_squared_error(y_true, y_pred[, …]) Mean squared error regression loss
# metrics.mean_squared_log_error(y_true, y_pred) Mean squared logarithmic error regression loss
# metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
# metrics.r2_score(y_true, y_pred[, …]) R^2 (coefficient of determination) regression score function.
##################################################
## linear regression ##
##################################################
'''
LinearRegression fits a linear model with coefficients w = (w_1, ..., w_p)
to minimize the residual sum of squares between the observed responses
in the dataset, and the responses predicted by the linear approximation.
Example:
http://scikit-learn.org/stable/modules/linear_model.html
'''
try:
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
#ols.predict(X_test, y_test)
predictions = cross_val_predict(ols, X_test, y_test, cv=6)
f=open('ols.pickle','wb')
pickle.dump(ols,f)
f.close()
except:
print('error - ORDINARY LEAST SQUARES')
# get stats
modeltypes.append('linear regression')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Ridge regression ##
##################################################
'''
Ridge regression addresses some of the problems of
Ordinary Least Squares by imposing a penalty on the
size of coefficients.
The ridge coefficients minimize a penalized residual sum of squares.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge
'''
try:
ridge = linear_model.Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X_train, y_train)
predictions = cross_val_predict(ridge, X_test, y_test, cv=6)
f=open('ridge.pickle','wb')
pickle.dump(ridge,f)
f.close()
except:
print('error - RIDGE REGRESSION')
# get stats
modeltypes.append('ridge regression')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## LASSO ##
##################################################
'''
The Lasso is a linear model that estimates sparse coefficients.
It is useful in some contexts due to its tendency to prefer solutions
with fewer parameter values, effectively reducing the number of
variables upon which the given solution is dependent.
For this reason, the Lasso and its variants are fundamental
to the field of compressed sensing. Under certain conditions,
it can recover the exact set of non-zero weights
(see Compressive sensing: tomography reconstruction with L1 prior (Lasso)).
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_model_selection.html#sphx-glr-auto-examples-linear-model-plot-lasso-model-selection-py
'''
try:
lasso = linear_model.Lasso(alpha = 0.1)
lasso.fit(X_train, y_train)
predictions = cross_val_predict(lasso, X_test, y_test, cv=6)
f=open('lasso.pickle','wb')
pickle.dump(lasso,f)
f.close()
except:
print('error - LASSO')
# get stats
modeltypes.append('LASSO')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Multi-task LASSO ##
##################################################
'''
The MultiTaskLasso is a linear model that estimates
sparse coefficients for multiple regression problems
jointly: y is a 2D array, of shape (n_samples, n_tasks).
The constraint is that the selected features are the same
for all the regression problems, also called tasks.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_multi_task_lasso_support.html#sphx-glr-auto-examples-linear-model-plot-multi-task-lasso-support-py
'''
# # ONLY WORKS ON y_train that is multidimensional (one hot encoded)
# # Generate some 2D coefficients with sine waves with random frequency and phase
# mlasso = linear_model.MultiTaskLasso(alpha=0.1)
# mlasso.fit(X_train, y_train)
# predictions = cross_val_predict(mlasso, X_test, y_test, cv=6)
# accuracy = metrics.r2_score(y_test, predictions)
##################################################
## Elastic net ##
##################################################
'''
ElasticNet is a linear regression model trained with L1 and L2 prior as regularizer.
This combination allows for learning a sparse model where few of the weights are non-zero
like Lasso, while still maintaining the regularization properties of Ridge.
We control the convex combination of L1 and L2 using the l1_ratio parameter.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_and_elasticnet.html#sphx-glr-auto-examples-linear-model-plot-lasso-and-elasticnet-py
'''
# need training data
try:
enet = linear_model.ElasticNet()
enet.fit(X_train, y_train)
predictions = cross_val_predict(enet, X_test, y_test, cv=6)
f=open('enet.pickle','wb')
pickle.dump(enet,f)
f.close()
except:
print('error - ELASTIC NET')
# get stats
modeltypes.append('elastic net')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Multi-task elastic net ##
##################################################
'''
The MultiTaskElasticNet is an elastic-net model that estimates sparse coefficients
for multiple regression problems jointly: Y is a 2D array, of shape (n_samples, n_tasks).
The constraint is that the selected features are the same for all the regression problems,
also called tasks.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.MultiTaskElasticNet.html
'''
# # # ONLY WORKS ON y_train that is multidimensional (one hot encoded)
# clf = linear_model.MultiTaskElasticNet()
# clf.fit(X_train, y_train)
# #print(clf.coef_)
# #print(clf.intercept_)
##################################################
## Least angle regression (LARS) ##
##################################################
'''
The advantages of LARS are:
-> It is numerically efficient in contexts where p >> n (i.e., when the number of dimensions is significantly greater than the number of points)
-> It is computationally just as fast as forward selection and has the same order of complexity as an ordinary least squares.
-> It produces a full piecewise linear solution path, which is useful in cross-validation or similar attempts to tune the model.
-> If two variables are almost equally correlated with the response, then their coefficients should increase at approximately the same rate. The algorithm thus behaves as intuition would expect, and also is more stable.
-> It is easily modified to produce solutions for other estimators, like the Lasso.
The disadvantages of the LARS method include:
-> Because LARS is based upon an iterative refitting of the residuals,
-> it would appear to be especially sensitive to the effects of noise.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lars.html
'''
try:
lars = linear_model.Lars(n_nonzero_coefs=1)
lars.fit(X_train, y_train)
predictions = cross_val_predict(lars, X_test, y_test, cv=6)
f=open('lars.pickle','wb')
pickle.dump(lars,f)
f.close()
except:
print('error - LARS')
# get stats
modeltypes.append('Least angle regression (LARS)')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## LARS LASSO ##
##################################################
'''
LassoLars is a lasso model implemented using the LARS algorithm,
and unlike the implementation based on coordinate_descent,
this yields the exact solution, which is piecewise linear
as a function of the norm of its coefficients.
Example:
http://scikit-learn.org/stable/modules/linear_model.html#passive-aggressive-algorithms
'''
try:
lars_lasso = linear_model.LassoLars()
lars_lasso.fit(X_train, y_train)
predictions = cross_val_predict(lars_lasso, X_test, y_test, cv=6)
f=open('lars_lasso.pickle','wb')
pickle.dump(lars_lasso,f)
f.close()
except:
print('error - LARS LASSO')
# get stats
modeltypes.append('LARS lasso')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Orthogonal Matching Pursuit (OMP) ##
##################################################
'''
OrthogonalMatchingPursuit and orthogonal_mp implements the OMP
algorithm for approximating the fit of a linear model with
constraints imposed on the number of non-zero coefficients (ie. the L 0 pseudo-norm).
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_omp.html#sphx-glr-auto-examples-linear-model-plot-omp-py
'''
try:
omp = linear_model.OrthogonalMatchingPursuit()
omp.fit(X_train, y_train)
predictions = cross_val_predict(omp, X_test, y_test, cv=6)
f=open('omp.pickle','wb')
pickle.dump(omp,f)
f.close()
except:
print('error - ORTHOGONAL MATCHING PURSUIT (OMP)')
# get stats
modeltypes.append('orthogonal matching pursuit (OMP)')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Bayesian ridge regression ##
##################################################
'''
The advantages of Bayesian Regression are:
-> It adapts to the data at hand.
-> It can be used to include regularization parameters in the estimation procedure.
The disadvantages of Bayesian regression include:
-> Inference of the model can be time consuming.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.BayesianRidge.html
'''
# MULTI-DIMENSIONAL
# clf = BayesianRidge()
# clf.fit(X_train, y_train)
# predictions = cross_val_predict(clf, X_test, y_test, cv=6)
# accuracy = metrics.r2_score(y_test, predictions)
##################################################
## Automatic relevance determination ##
##################################################
'''
ARDRegression is very similar to Bayesian Ridge Regression,
but can lead to sparser weights w [1] [2]. ARDRegression poses
a different prior over w, by dropping the assumption of
the Gaussian being spherical.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_ard.html#sphx-glr-auto-examples-linear-model-plot-ard-py
'''
# MULTI-DIMENSIONAL
# clf = ARDRegression(compute_score=True)
# clf.fit(X_train, y_train)
# predictions = cross_val_predict(clf, X_test, y_test, cv=6)
# accuracy = metrics.r2_score(y_test, predictions)
##################################################
## Logistic regression ##
##################################################
'''
Logistic regression, despite its name, is a linear model
for classification rather than regression. Logistic regression
is also known in the literature as logit regression,
maximum-entropy classification (MaxEnt) or the log-linear classifier.
In this model, the probabilities describing the possible outcomes
of a single trial are modeled using a logistic function.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_logistic_l1_l2_sparsity.html#sphx-glr-auto-examples-linear-model-plot-logistic-l1-l2-sparsity-py
'''
try:
lr = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
lr.fit(X_train, y_train)
predictions = cross_val_predict(lr, X_test, y_test, cv=6)
f=open('lr.pickle','wb')
pickle.dump(lr,f)
f.close()
except:
print('error - LOGISTIC REGRESSION')
# get stats
modeltypes.append('logistic regression')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Stochastic gradient descent (SGD) ##
##################################################
'''
Stochastic gradient descent is a simple yet very efficient
approach to fit linear models. It is particularly useful
when the number of samples (and the number of features) is very large.
The partial_fit method allows only/out-of-core learning.
The classes SGDClassifier and SGDRegressor provide functionality
to fit linear models for classification and regression using
different (convex) loss functions and different penalties.
E.g., with loss="log", SGDClassifier fits a logistic regression model,
while with loss="hinge" it fits a linear support vector machine (SVM).
Example:
http://scikit-learn.org/stable/modules/sgd.html#sgd
'''
try:
# note you have to scale the data, as SGD algorithms are sensitive to
# feature scaling
scaler = StandardScaler()
scaler.fit(X_train)
X_train_2 = scaler.transform(X_train)
X_test_2 = scaler.transform(X_test)
sgd = linear_model.SGDRegressor()
sgd.fit(X_train_2, y_train)
predictions = cross_val_predict(sgd, X_test_2, y_test, cv=6)
f=open('sgd.pickle','wb')
pickle.dump(sgd,f)
f.close()
except:
print('error - STOCHASTIC GRADIENT DESCENT')
# get stats
modeltypes.append('stochastic gradient descent (SGD)')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Perceptron algorithms ##
##################################################
'''
Multi-layer Perceptron is sensitive to feature scaling,
so it is highly recommended to scale your data.
For example, scale each attribute on the input vector X to [0, 1] or [-1, +1],
or standardize it to have mean 0 and variance 1.
Note that you must apply the same scaling to the test
set for meaningful results. You can use StandardScaler for standardization.
change the solver to 'lbfgs'. The default'adam' is a SGD-like method,
hich is effective for large & messy data but pretty useless for this kind of smooth & small data.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PassiveAggressiveRegressor.html#sklearn.linear_model.PassiveAggressiveRegressor
'''
try:
nn = MLPRegressor(solver='lbfgs')
nn.fit(X_train, y_train)
predictions = cross_val_predict(nn, X_test, y_test, cv=6)
f=open('nn.pickle','wb')
pickle.dump(nn,f)
f.close()
except:
print('error - MLP REGRESSOR')
# get stats
modeltypes.append('perceptron')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Passive-agressive algorithms ##
##################################################
'''
The passive-aggressive algorithms are a family of algorithms
for large-scale learning. They are similar to the Perceptron
in that they do not require a learning rate. However,
contrary to the Perceptron, they include a regularization parameter C.
Example:
http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf
'''
try:
pa_regr = linear_model.PassiveAggressiveRegressor(random_state=0)
pa_regr.fit(X_train, y_train)
predictions = cross_val_predict(pa_regr, X_test, y_test, cv=6)
f=open('pa_regr.pickle','wb')
pickle.dump(pa_regr,f)
f.close()
except:
print('error - PASSIVE-AGGRESSIVE')
# get stats
modeltypes.append('passive-agressive algorithm')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## RANSAC ##
##################################################
'''
When in doubt, use RANSAC
RANSAC (RANdom SAmple Consensus) fits a model from random subsets of
inliers from the complete data set.
RANSAC is a non-deterministic algorithm producing only a reasonable
result with a certain probability, which is dependent on the number
of iterations (see max_trials parameter). It is typically used for
linear and non-linear regression problems and is especially popular
in the fields of photogrammetric computer vision.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_ransac.html#sphx-glr-auto-examples-linear-model-plot-ransac-py
'''
try:
ransac = linear_model.RANSACRegressor()
ransac.fit(X_train, y_train)
predictions = cross_val_predict(ransac, X_test, y_test, cv=6)
f=open('ransac.pickle','wb')
pickle.dump(ransac,f)
f.close()
except:
print('error - RANSAC')
# get stats
modeltypes.append('RANSAC')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Theil-SEN ##
##################################################
'''
The TheilSenRegressor estimator uses a generalization of the median
in multiple dimensions. It is thus robust to multivariate outliers.
Note however that the robustness of the estimator decreases quickly
with the dimensionality of the problem. It looses its robustness
properties and becomes no better than an ordinary least squares
in high dimension.
Note takes a bit longer to train.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_theilsen.html#sphx-glr-auto-examples-linear-model-plot-theilsen-py
'''
try:
theilsen=linear_model.TheilSenRegressor(random_state=42)
theilsen.fit(X_train, y_train)
predictions = cross_val_predict(theilsen, X_test, y_test, cv=6)
f=open('theilsen.pickle','wb')
pickle.dump(theilsen,f)
f.close()
except:
print('error - THEILSEN')
# get stats
modeltypes.append('Theil-Sen')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Huber Regression ##
##################################################
'''
The HuberRegressor is different to Ridge because it applies a linear loss
to samples that are classified as outliers. A sample is classified as an
inlier if the absolute error of that sample is lesser than a certain threshold.
It differs from TheilSenRegressor and RANSACRegressor because it does not
ignore the effect of the outliers but gives a lesser weight to them.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_huber_vs_ridge.html#sphx-glr-auto-examples-linear-model-plot-huber-vs-ridge-py
'''
try:
huber = linear_model.HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100)
huber.fit(X_train, y_train)
predictions = cross_val_predict(huber, X_test, y_test, cv=6)
f=open('huber.pickle','wb')
pickle.dump(huber,f)
f.close()
except:
print('error - HUBER')
# get stats
modeltypes.append('huber regression')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Polynomial Regression ##
##################################################
'''
One common pattern within machine learning is to use linear models trained on
nonlinear functions of the data. This approach maintains the generally fast
performance of linear methods, while allowing them to fit a much wider range of data.
Example:
http://scikit-learn.org/stable/modules/linear_model.html#passive-aggressive-algorithms
'''
try:
poly_lr = Pipeline([
('poly', PolynomialFeatures(degree=5, include_bias=False)),
('linreg', LinearRegression(normalize=True))
])
poly_lr.fit(X_train, y_train)
predictions = cross_val_predict(poly_lr, X_test, y_test, cv=6)
accuracy = metrics.r2_score(y_test, predictions)
f=open('poly_lr.pickle','wb')
pickle.dump(poly_lr,f)
f.close()
except:
print('error - POLYNOMIAL')
# get stats
modeltypes.append('polynomial (linear regression)')
explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores = update_list(explained_variances, mean_absolute_errors, mean_squared_errors, mean_squared_log_errors, median_absolute_errors, r2_scores)
##################################################
## Write session to .JSON ##
##################################################
os.chdir(modeldir)
print('\n\n')
print('RESULTS: \n')
# print table in terminal
table = BeautifulTable()
table.column_headers = ["model type", "R^2 score", "Mean Absolute Errors"]
for i in range(len(modeltypes)):
table.append_row([modeltypes[i], str(r2_scores[i]), str(mean_absolute_errors[i])])
print(table)
filename=name[0:-5]+'.xlsx'
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Model type')
worksheet.write('B1', 'R^2 score')
worksheet.write('C1', 'Explained Variances')
worksheet.write('D1', 'Mean Absolute Errors')
worksheet.write('E1', 'Mean Squared Log Errors')
worksheet.write('F1', 'Median Absolute Errors')
#worksheet.write('G1', 'Mean Squared Errors')
# print the best model in terms of mean abolute error
varnames=['ols.pickle', 'ridge.pickle', 'lasso.pickle', 'enet.pickle', 'lars.pickle',
'lars_lasso.pickle','omp.pickle', 'lr.pickle','sgd.pickle', 'nn.pickle','pa_regr.pickle',
'ransac.pickle', 'theilsen.pickle', 'huber.pickle', 'poly_lr.pickle']
# make sure all numbers, make mae 10 (a large number, to eliminate it from the list of options)
mae=mean_absolute_errors
for i in range(len(mae)):
if mae[i] == 'n/a':
mae[i]=10
else:
mae[i]=float(mae[i])
# get minimim index and now delete temp folder, put master file in models directory
minval=np.amin(mae)
ind=mae.index(minval)
print('%s has the lowest mean absolute error (%s)'%(modeltypes[ind], str(minval)))
# rename file
os.chdir(tempdir)
newname= foldername+'.pickle'
print('saving file to disk (%s)...'%(newname))
os.rename(varnames[ind], newname)
# move to models directory
shutil.copy(os.getcwd()+'/'+newname, modeldir+'/'+newname)
# now delete temp folder
os.chdir(modeldir)
shutil.rmtree(foldername)
# output stats of saved file (for analysis later)
classes=[one,two]
data={
'model name':newname,
'model type':modeltypes[ind],
'stats':{
'explained_variance':explained_variances[ind],
'mean_absolute_error':mean_absolute_errors[ind],
#'mean_squared_error': mean_squared_errors[ind-1],
'mean_squared_log_error':mean_squared_log_errors[ind],
'median_absolute_error':median_absolute_errors[ind],
'r2_score':r2_scores[ind-1]
},
'classes':classes,
}
jsonfilename=name[0:-5]+'_regression.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# output spreadsheet of results and open up for analyis
for i in range(len(modeltypes)):
try:
worksheet.write('A'+str(i+2), str(modeltypes[i]))
worksheet.write('B'+str(i+2), str(r2_scores[i]))
worksheet.write('C'+str(i+2), str(explained_variances[i]))
worksheet.write('D'+str(i+2), str(mean_absolute_errors[i]))
worksheet.write('E'+str(i+2), str(mean_squared_log_errors[i]))
worksheet.write('F'+str(i+2), str(median_absolute_errors[i]))
#worksheet.write('G'+str(i+2), str(mean_squared_errors[i]))
except:
pass
workbook.close()
os.system('open %s'%(filename))
|
ae94ca910f88d795aef2db8c7e800cbf1d012b54
|
cb4b3f0b1ebd13d0c448c8267873ab3ff1b7ee1f
|
/flask_appbuilder/fields.py
|
001ed4255348e3f7ec2576947b2225fffe99576f
|
[
"BSD-3-Clause"
] |
permissive
|
dpgaspar/Flask-AppBuilder
|
2681797993a08c324777866d69f30d6a56401c3d
|
74f37e21a3c9c7ca7fb3e56f73759e3eaa2ead6b
|
refs/heads/master
| 2023-09-05T18:45:21.119981
| 2023-08-22T10:58:54
| 2023-08-22T10:58:54
| 14,265,145
| 4,550
| 1,566
|
BSD-3-Clause
| 2023-09-12T04:02:09
| 2013-11-09T21:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 8,785
|
py
|
fields.py
|
from __future__ import unicode_literals
import operator
from wtforms import widgets
from wtforms.fields import Field, SelectField, SelectFieldBase
from wtforms.validators import ValidationError
class AJAXSelectField(Field):
"""
Simple class to convert primary key to ORM objects
for SQLAlchemy and fab normal processing on add and update
This WTF field class is prepared to be used in related views or directly on forms.
:param label: The label to render on form
:param validators: A list of form validators
:param: datamodel: An initialized SQLAInterface with a model
:param: col_name: The column that maps to the model
:param: is_related:
If the model column is a relationship or direct on
this case use col_name with the pk
"""
def __init__(
self,
label=None,
validators=None,
datamodel=None,
col_name=None,
is_related=True,
**kwargs
):
super(AJAXSelectField, self).__init__(label, validators, **kwargs)
self.datamodel = datamodel
self.col_name = col_name
self.is_related = is_related
def process_data(self, value):
"""
Process the Python data applied to this field and store the result.
This will be called during form construction by the form's `kwargs` or
`obj` argument.
Converting ORM object to primary key for client form.
:param value: The python object containing the value to process.
"""
if value:
if self.is_related:
self.data = self.datamodel.get_related_interface(
self.col_name
).get_pk_value(value)
else:
self.data = self.datamodel.get(value)
else:
self.data = None
def process_formdata(self, valuelist):
"""
Process data received over the wire from a form.
This will be called during form construction with data supplied
through the `formdata` argument.
Converting primary key to ORM for server processing.
:param valuelist: A list of strings to process.
"""
if valuelist:
if self.is_related:
self.data = self.datamodel.get_related_interface(self.col_name).get(
valuelist[0]
)
else:
self.data = self.datamodel.get(valuelist[0])
class QuerySelectField(SelectFieldBase):
"""
Based on WTForms QuerySelectField
"""
widget = widgets.Select()
def __init__(
self,
label=None,
validators=None,
query_func=None,
get_pk_func=None,
get_label=None,
allow_blank=False,
blank_text="",
**kwargs
):
super(QuerySelectField, self).__init__(label, validators, **kwargs)
self.query_func = query_func
self.get_pk_func = get_pk_func
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, str):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
objs = self.query_func()
self._object_list = list((str(self.get_pk_func(obj)), obj) for obj in objs)
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield ("__None", self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == "__None":
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
data = self.data
if data is not None:
for pk, obj in self._get_object_list():
if data == obj:
break
else:
raise ValidationError(self.gettext("Not a valid choice"))
elif self._formdata or not self.allow_blank:
raise ValidationError(self.gettext("Not a valid choice"))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(QuerySelectMultipleField, self).__init__(
label, validators, default=default, **kwargs
)
if kwargs.get("allow_blank", False):
import warnings
warnings.warn(
"allow_blank=True does not do anything for QuerySelectMultipleField."
)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext("Not a valid choice"))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
if not isinstance(self.data, list):
self.data = [self.data]
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext("Not a valid choice"))
class EnumField(SelectField):
"""Selection field for Sqlalchemy Enum type.
The meaning of enum_class and enums is the same as for
attributes on sqlalchemy.types.Enum:
:param enum_class: either None or a subclass of Python enum.Enum
:param enums: a sequence of strings, if enum_class is not Null than it should be
`list(enum_class.__members__)`
"""
def __init__(
self, enum_class, enums, label=None, validators=None, default=None, **kwargs
):
self._enum_class = enum_class
self._enums = enums
# Column(Enum(enum.Enum)) case
if enum_class is not None:
labels = [
str(enum_class.__members__[enum_member].value) for enum_member in enums
]
def coerce(value):
if value is None:
return None
elif isinstance(value, enum_class):
return value
else:
return enum_class.__members__[value]
# Column(Enum(*enums)) case
else:
labels = enums
def coerce(value):
if value is None:
return None
return str(value)
choices = list(zip(enums, labels))
super(EnumField, self).__init__(
label=label,
validators=validators,
default=default,
coerce=coerce,
choices=choices,
**kwargs
)
def pre_validate(self, form):
for v, _ in self.choices:
if self.data == self.coerce(v):
break
else:
raise ValueError(self.gettext("Not a valid choice"))
|
88448369190ee1943d43fb81821286af37774e43
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/ranking/management/modules/quora.py
|
cbe7708d30e9a35ed5580a34a309053922c04d35
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
quora.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import csv
import arrow
from ranking.management.modules.common import REQ, BaseModule, parsed_table
class Statistic(BaseModule):
def get_standings(self, users=None, statistics=None):
result = {}
result = {}
problems_info = collections.OrderedDict()
def process_kv(row, k, v):
if k == 'Rank':
row['place'] = v
elif k == 'Username':
row['member'] = v
elif k == 'User':
if 'member' not in row:
row['member'] = v + ' ' + season
row['name'] = v
elif k == 'Last valid submission':
delta = arrow.get(v, ['YYYY-MM-DD H:mm:ss']) - self.start_time
row['penalty'] = self.to_time(delta, 3)
elif k in ['Global']:
row['solving'] = v
else:
problems = row.setdefault('problems', {})
if k not in problems_info:
problems_info[k] = {'short': k, 'full_score': 100}
if float(v) > 1e-9:
p = problems.setdefault(k, {})
p['result'] = v
p['partial'] = float(v) + 1e-9 < problems_info[k]['full_score']
filepath = self.info.get('standings_csv_filepath_')
if filepath:
season = self.get_season()
with open(filepath, 'r') as fo:
data = csv.DictReader(fo)
last, place = None, None
for idx, r in enumerate(data, start=1):
row = collections.OrderedDict()
for k, v in r.items():
process_kv(row, k, v)
score = (row['solving'], row['penalty'])
if last != score:
last = score
place = idx
row['place'] = place
result[row['member']] = row
elif self.standings_url:
page = REQ.get(self.standings_url)
problems_info = collections.OrderedDict()
table = parsed_table.ParsedTable(page)
for r in table:
row = collections.OrderedDict()
for k, v in r.items():
process_kv(row, k, v.value)
result[row['member']] = row
standings = {
'result': result,
'problems': list(problems_info.values()),
'hidden_fields': ['medal'],
}
return standings
|
1c30acf4ee575e5e9c154cd7c1335dd025a34c7e
|
9f84d91a8ae3df53b07fe3267992fba00a99ac9e
|
/torch_geometric/utils/to_dense_adj.py
|
e2423735a5ffa5441ac1a8149072b8a9d45c8b08
|
[
"MIT"
] |
permissive
|
pyg-team/pytorch_geometric
|
ebea601eae228f3905465b5c2349d3fb3bb5cb26
|
a52af694b8ce6a80811e20966fe6d08a3e7511fe
|
refs/heads/master
| 2023-08-31T04:13:40.943308
| 2023-08-30T12:48:42
| 2023-08-30T12:48:42
| 106,024,057
| 6,775
| 1,563
|
MIT
| 2023-09-14T17:10:18
| 2017-10-06T16:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,449
|
py
|
to_dense_adj.py
|
from typing import Optional
import torch
from torch import Tensor
from torch_geometric.typing import OptTensor
from torch_geometric.utils import scatter
def to_dense_adj(
edge_index: Tensor,
batch: OptTensor = None,
edge_attr: OptTensor = None,
max_num_nodes: Optional[int] = None,
batch_size: Optional[int] = None,
) -> Tensor:
r"""Converts batched sparse adjacency matrices given by edge indices and
edge attributes to a single dense batched adjacency matrix.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
edge_attr (Tensor, optional): Edge weights or multi-dimensional edge
features. (default: :obj:`None`)
max_num_nodes (int, optional): The size of the output node dimension.
(default: :obj:`None`)
batch_size (int, optional) The batch size. (default: :obj:`None`)
:rtype: :class:`Tensor`
Examples:
>>> edge_index = torch.tensor([[0, 0, 1, 2, 3],
... [0, 1, 0, 3, 0]])
>>> batch = torch.tensor([0, 0, 1, 1])
>>> to_dense_adj(edge_index, batch)
tensor([[[1., 1.],
[1., 0.]],
[[0., 1.],
[1., 0.]]])
>>> to_dense_adj(edge_index, batch, max_num_nodes=4)
tensor([[[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]]])
>>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])
>>> to_dense_adj(edge_index, batch, edge_attr)
tensor([[[1., 2.],
[3., 0.]],
[[0., 4.],
[5., 0.]]])
"""
if batch is None:
num_nodes = int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
batch = edge_index.new_zeros(num_nodes)
if batch_size is None:
batch_size = int(batch.max()) + 1 if batch.numel() > 0 else 1
one = batch.new_ones(batch.size(0))
num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum')
cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)])
idx0 = batch[edge_index[0]]
idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]]
idx2 = edge_index[1] - cum_nodes[batch][edge_index[1]]
if max_num_nodes is None:
max_num_nodes = int(num_nodes.max())
elif ((idx1.numel() > 0 and idx1.max() >= max_num_nodes)
or (idx2.numel() > 0 and idx2.max() >= max_num_nodes)):
mask = (idx1 < max_num_nodes) & (idx2 < max_num_nodes)
idx0 = idx0[mask]
idx1 = idx1[mask]
idx2 = idx2[mask]
edge_attr = None if edge_attr is None else edge_attr[mask]
if edge_attr is None:
edge_attr = torch.ones(idx0.numel(), device=edge_index.device)
size = [batch_size, max_num_nodes, max_num_nodes]
size += list(edge_attr.size())[1:]
flattened_size = batch_size * max_num_nodes * max_num_nodes
idx = idx0 * max_num_nodes * max_num_nodes + idx1 * max_num_nodes + idx2
adj = scatter(edge_attr, idx, dim=0, dim_size=flattened_size, reduce='sum')
adj = adj.view(size)
return adj
|
494cbac7921933e8d333c55469dd9b18628551fa
|
74218ee28b26a2b0c28c69c01eabb8b81b9d1f1d
|
/tests/test_integration.py
|
1dc99062ce39b4fef8b45fdb8ff7503cad3cbc41
|
[
"MIT"
] |
permissive
|
readthedocs/sphinx-autoapi
|
5f2cd9f996693b14325e9452165731b6fb0a89e6
|
92437c9af72b75fbccb451da95f85dbbf0c3f0da
|
refs/heads/main
| 2023-09-02T19:45:48.149162
| 2023-07-08T17:38:24
| 2023-07-08T17:38:24
| 36,524,868
| 277
| 129
|
MIT
| 2023-08-21T12:48:43
| 2015-05-29T19:32:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
test_integration.py
|
import io
import os
import shutil
from contextlib import contextmanager
from sphinx.application import Sphinx
@contextmanager
def sphinx_build(test_dir, confoverrides=None):
os.chdir("tests/{0}".format(test_dir))
try:
app = Sphinx(
srcdir=".",
confdir=".",
outdir="_build/text",
doctreedir="_build/.doctrees",
buildername="text",
confoverrides=confoverrides,
)
app.build(force_all=True)
yield
finally:
if os.path.exists("_build"):
shutil.rmtree("_build")
os.chdir("../..")
class LanguageIntegrationTests:
def _run_test(self, test_dir, test_file, test_string):
with sphinx_build(test_dir):
with io.open(test_file, encoding="utf8") as fin:
text = fin.read().strip()
assert test_string in text
class TestIntegration(LanguageIntegrationTests):
def test_template_overrides(self):
self._run_test(
"templateexample",
"_build/text/autoapi/example/index.txt",
"This is a function template override",
)
class TestTOCTree(LanguageIntegrationTests):
def test_toctree_overrides(self):
self._run_test("toctreeexample", "_build/text/index.txt", "API Reference")
def test_toctree_domain_insertion(self):
"""
Test that the example_function gets added to the TOC Tree
"""
self._run_test(
"toctreeexample", "_build/text/index.txt", '* "example_function()"'
)
|
90f66304dd53e830d1e2aa1c75483ab06717a559
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/Configuration/python/HLT_75e33/modules/hltPixelLayerPairsUnseeded_cfi.py
|
4f4ff5cf247062ed05069ec4b4b38ace5ef77d8d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,827
|
py
|
hltPixelLayerPairsUnseeded_cfi.py
|
import FWCore.ParameterSet.Config as cms
hltPixelLayerPairsUnseeded = cms.EDProducer("SeedingLayersEDProducer",
BPix = cms.PSet(
HitProducer = cms.string('siPixelRecHits'),
TTRHBuilder = cms.string('TTRHBuilderWithoutAngle4PixelTriplets'),
skipClusters = cms.InputTag("hltElePixelHitTripletsClusterRemoverUnseeded")
),
FPix = cms.PSet(
HitProducer = cms.string('siPixelRecHits'),
TTRHBuilder = cms.string('TTRHBuilderWithoutAngle4PixelTriplets'),
skipClusters = cms.InputTag("hltElePixelHitTripletsClusterRemoverUnseeded")
),
MTEC = cms.PSet(
),
MTIB = cms.PSet(
),
MTID = cms.PSet(
),
MTOB = cms.PSet(
),
TEC = cms.PSet(
),
TIB = cms.PSet(
),
TID = cms.PSet(
),
TOB = cms.PSet(
),
layerList = cms.vstring(
'BPix1+BPix2',
'BPix1+BPix3',
'BPix1+BPix4',
'BPix2+BPix3',
'BPix2+BPix4',
'BPix3+BPix4',
'FPix1_pos+FPix2_pos',
'FPix1_pos+FPix3_pos',
'FPix2_pos+FPix3_pos',
'BPix1+FPix1_pos',
'BPix1+FPix2_pos',
'BPix1+FPix3_pos',
'BPix2+FPix1_pos',
'BPix2+FPix2_pos',
'BPix2+FPix3_pos',
'BPix3+FPix1_pos',
'BPix3+FPix2_pos',
'BPix3+FPix3_pos',
'BPix4+FPix1_pos',
'BPix4+FPix2_pos',
'BPix4+FPix3_pos',
'FPix1_neg+FPix2_neg',
'FPix1_neg+FPix3_neg',
'FPix2_neg+FPix3_neg',
'BPix1+FPix1_neg',
'BPix1+FPix2_neg',
'BPix1+FPix3_neg',
'BPix2+FPix1_neg',
'BPix2+FPix2_neg',
'BPix2+FPix3_neg',
'BPix3+FPix1_neg',
'BPix3+FPix2_neg',
'BPix3+FPix3_neg',
'BPix4+FPix1_neg',
'BPix4+FPix2_neg',
'BPix4+FPix3_neg'
)
)
|
7b9945b4ab496d7ce2d877f2e00d402818b33b24
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Cocoa/PyObjCTest/test_nsurlconnection.py
|
e138383b474ea047b0444e919e147efd47a93e38
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,750
|
py
|
test_nsurlconnection.py
|
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level, min_sdk_level
import objc
class TestNSURLConnectionHelper(Foundation.NSObject):
def connection_canAuthenticateAgainstProtectionSpace_(self, a, b):
return 1
def connectionShouldUseCredentialStorage_(self, a):
return 1
def connection_didSendBodyData_totalBytesWritten_totalBytesExpectedToWrite_(
self, a, b, c, d
):
return 1
def connection_didWriteData_totalBytesWritten_expectedTotalBytes_(self, a, b, c, d):
return 1
def connectionDidResumeDownloading_totalBytesWritten_expectedTotalBytes_(
self, a, b, c
):
return 1
class TestNSURLConnection(TestCase):
def testMethods(self):
self.assertResultIsBOOL(Foundation.NSURLConnection.canHandleRequest_)
self.assertArgIsBOOL(
Foundation.NSURLConnection.initWithRequest_delegate_startImmediately_, 2
)
self.assertArgIsOut(
Foundation.NSURLConnection.sendSynchronousRequest_returningResponse_error_,
1,
)
self.assertArgIsOut(
Foundation.NSURLConnection.sendSynchronousRequest_returningResponse_error_,
2,
)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertResultIsBOOL(
TestNSURLConnectionHelper.connection_canAuthenticateAgainstProtectionSpace_
)
self.assertResultIsBOOL(
TestNSURLConnectionHelper.connectionShouldUseCredentialStorage_
)
self.assertArgHasType(
TestNSURLConnectionHelper.connection_didSendBodyData_totalBytesWritten_totalBytesExpectedToWrite_, # noqa: B950
1,
objc._C_NSInteger,
)
self.assertArgHasType(
TestNSURLConnectionHelper.connection_didSendBodyData_totalBytesWritten_totalBytesExpectedToWrite_, # noqa: B950
2,
objc._C_NSInteger,
)
self.assertArgHasType(
TestNSURLConnectionHelper.connection_didSendBodyData_totalBytesWritten_totalBytesExpectedToWrite_, # noqa: B950
3,
objc._C_NSInteger,
)
self.assertArgHasType(
TestNSURLConnectionHelper.connection_didWriteData_totalBytesWritten_expectedTotalBytes_, # noqa: B950
1,
objc._C_LNG_LNG,
)
self.assertArgHasType(
TestNSURLConnectionHelper.connection_didWriteData_totalBytesWritten_expectedTotalBytes_, # noqa: B950
2,
objc._C_LNG_LNG,
)
self.assertArgHasType(
TestNSURLConnectionHelper.connection_didWriteData_totalBytesWritten_expectedTotalBytes_, # noqa: B950
3,
objc._C_LNG_LNG,
)
self.assertArgHasType(
TestNSURLConnectionHelper.connectionDidResumeDownloading_totalBytesWritten_expectedTotalBytes_, # noqa: B950
1,
objc._C_LNG_LNG,
)
self.assertArgHasType(
TestNSURLConnectionHelper.connectionDidResumeDownloading_totalBytesWritten_expectedTotalBytes_, # noqa: B950
2,
objc._C_LNG_LNG,
)
@min_os_level("10.7")
def testMethods10_7(self):
self.assertArgIsBlock(
Foundation.NSURLConnection.sendAsynchronousRequest_queue_completionHandler_,
2,
b"v@@",
)
@min_sdk_level("10.7")
def testProtocolObjects(self):
self.assertProtocolExists("NSURLConnectionDelegate")
@min_sdk_level("10.10")
def testProtocolObjects10_10(self):
self.assertProtocolExists("NSURLConnectionDataDelegate")
self.assertProtocolExists("NSURLConnectionDownloadDelegate")
|
78868b645e523dea3d882ea6da3da1d2b2083378
|
ad89d921c37ab2a6cc952181a4a15c41c0637270
|
/controller/urls.py
|
6053190d77fe2b7e685726e362f985ce503bb64e
|
[] |
no_license
|
vulhub/MetaDockers
|
f3b693e3dab73de482a04f7e63307c70a106343e
|
da82440fdfc90e4c92fc553a0cdb278cf9d7798d
|
refs/heads/master
| 2023-05-20T01:59:04.449519
| 2020-03-03T09:30:03
| 2020-03-03T09:30:03
| 103,743,717
| 131
| 38
| null | 2021-06-10T18:39:13
| 2017-09-16T10:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 513
|
py
|
urls.py
|
from django.conf.urls import url, handler404, handler500
from . import views
urlpatterns = [
url(r'^info/$', views.infoRouting),
url(r'^images', views.imageRouting),
url(r'^index/$', views.indexRouting),
url(r'^about/$', views.aboutRouting),
url(r'^vulhubs/$', views.vulhubRouting),
url(r'^volumes/$', views.volumeRouting),
url(r'^network', views.networkRouting),
url(r'^container', views.containerRouting),
url(r'test', views.testRouting)
]
handler404 = views.not_found
handler500 = views.server_error
|
11c2f8cd3a1e029fdedcdecc0822e9f578df02c4
|
045ec3ae16fc554a05510abc3697557ebc5ce304
|
/CIME/case/__init__.py
|
db92711a2e60eba6da1bc33b5e8e06122af0dcac
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ESMCI/cime
|
c09223ee9b8a463bd00741ff39f60fda7639af89
|
02fad90a379cdbd3c1106cbd63324480f0bf7a22
|
refs/heads/master
| 2023-08-16T07:03:22.224344
| 2023-08-03T19:47:53
| 2023-08-03T19:47:53
| 31,605,662
| 159
| 179
|
NOASSERTION
| 2023-09-12T18:38:42
| 2015-03-03T15:33:00
|
Python
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
__init__.py
|
from CIME.case.case import Case
|
a916fb0252d3ce3862ded03ccf394baa9632f365
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/securityinsights/v20230701preview/get_aws_s3_data_connector.py
|
63ab84c51bfe72b23f41e0c39d3b2265ab0a0833
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,637
|
py
|
get_aws_s3_data_connector.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAwsS3DataConnectorResult',
'AwaitableGetAwsS3DataConnectorResult',
'get_aws_s3_data_connector',
'get_aws_s3_data_connector_output',
]
@pulumi.output_type
class GetAwsS3DataConnectorResult:
"""
Represents Amazon Web Services S3 data connector.
"""
def __init__(__self__, data_types=None, destination_table=None, etag=None, id=None, kind=None, name=None, role_arn=None, sqs_urls=None, system_data=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if destination_table and not isinstance(destination_table, str):
raise TypeError("Expected argument 'destination_table' to be a str")
pulumi.set(__self__, "destination_table", destination_table)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if role_arn and not isinstance(role_arn, str):
raise TypeError("Expected argument 'role_arn' to be a str")
pulumi.set(__self__, "role_arn", role_arn)
if sqs_urls and not isinstance(sqs_urls, list):
raise TypeError("Expected argument 'sqs_urls' to be a list")
pulumi.set(__self__, "sqs_urls", sqs_urls)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.AwsS3DataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter(name="destinationTable")
def destination_table(self) -> str:
"""
The logs destination table name in LogAnalytics.
"""
return pulumi.get(self, "destination_table")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'AmazonWebServicesS3'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The Aws Role Arn that is used to access the Aws account.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="sqsUrls")
def sqs_urls(self) -> Sequence[str]:
"""
The AWS sqs urls for the connector.
"""
return pulumi.get(self, "sqs_urls")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetAwsS3DataConnectorResult(GetAwsS3DataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAwsS3DataConnectorResult(
data_types=self.data_types,
destination_table=self.destination_table,
etag=self.etag,
id=self.id,
kind=self.kind,
name=self.name,
role_arn=self.role_arn,
sqs_urls=self.sqs_urls,
system_data=self.system_data,
type=self.type)
def get_aws_s3_data_connector(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAwsS3DataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230701preview:getAwsS3DataConnector', __args__, opts=opts, typ=GetAwsS3DataConnectorResult).value
return AwaitableGetAwsS3DataConnectorResult(
data_types=pulumi.get(__ret__, 'data_types'),
destination_table=pulumi.get(__ret__, 'destination_table'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
role_arn=pulumi.get(__ret__, 'role_arn'),
sqs_urls=pulumi.get(__ret__, 'sqs_urls'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_aws_s3_data_connector)
def get_aws_s3_data_connector_output(data_connector_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAwsS3DataConnectorResult]:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
...
|
6b1e267d8c7648dd5733ed924a13eff50729b0e1
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-security/ranger/intg/src/test/python/test_ranger_client.py
|
cbedcbf763199e75d82804e67f71ae829c135b03
|
[
"Apache-2.0",
"BSD-3-Clause",
"WTFPL",
"MIT",
"GPL-2.0-only"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 4,386
|
py
|
test_ranger_client.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import patch
from apache_ranger.exceptions import RangerServiceException
from apache_ranger.model.ranger_service import RangerService
try:
from apache_ranger.client.ranger_client import API, HttpMethod, HTTPStatus, RangerClient
except ModuleNotFoundError: # requests not installed
exit() # skipping unit tests
class MockResponse:
def __init__(self, status_code, response=None, content=None):
self.status_code = status_code
self.response = response
self.content = content
return
def json(self):
return self.response
def text(self):
return str(self.content)
class TestRangerClient(unittest.TestCase):
URL = "url"
AUTH = ("user", "password")
@patch('apache_ranger.client.ranger_client.Session')
def test_get_service_unavailable(self, mock_session):
mock_session.return_value.get.return_value = MockResponse(HTTPStatus.SERVICE_UNAVAILABLE)
result = RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
self.assertTrue(result is None)
@patch('apache_ranger.client.ranger_client.Session')
def test_get_success(self, mock_session):
response = [ RangerService() ]
mock_session.return_value.get.return_value = MockResponse(HTTPStatus.OK, response=response, content='Success')
result = RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
self.assertEqual(response, result)
@patch('apache_ranger.client.ranger_client.Session')
@patch('apache_ranger.client.ranger_client.Response')
def test_get_unexpected_status_code(self, mock_response, mock_session):
content = 'Internal Server Error'
mock_response.text = content
mock_response.content = content
mock_response.status_code = HTTPStatus.INTERNAL_SERVER_ERROR
mock_session.return_value.get.return_value = mock_response
try:
RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
except RangerServiceException as e:
self.assertTrue(HTTPStatus.INTERNAL_SERVER_ERROR, e.statusCode)
@patch('apache_ranger.client.ranger_client.RangerClient.FIND_SERVICES')
def test_unexpected_http_method(self, mock_api):
mock_api.method.return_value = "PATCH"
mock_api.url = TestRangerClient.URL
mock_api.path = RangerClient.URI_SERVICE
try:
RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
except RangerServiceException as e:
self.assertTrue('Unsupported HTTP Method' in repr(e))
def test_url_missing_format(self):
params = {'arg1': 1, 'arg2': 2}
try:
API("{arg1}test{arg2}path{arg3}", HttpMethod.GET, HTTPStatus.OK).format_path(params)
self.fail("Supposed to fail")
except KeyError as e:
self.assertTrue('KeyError' in repr(e))
def test_url_invalid_format(self):
params = {'1', '2'}
try:
API("{}test{}path{}", HttpMethod.GET, HTTPStatus.OK).format_path(params)
self.fail("Supposed to fail")
except TypeError as e:
self.assertTrue('TypeError' in repr(e))
if __name__ == '__main__':
unittest.main()
|
946b4b6fddeb01392fbc589695ed58933b275558
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoMET/METFilters/python/badGlobalMuonTaggersAOD_cff.py
|
b2d5b1619298d89a3254a407fc309db9f11bfe72
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 454
|
py
|
badGlobalMuonTaggersAOD_cff.py
|
import FWCore.ParameterSet.Config as cms
badGlobalMuonTagger = cms.EDFilter("BadGlobalMuonTagger",
muons = cms.InputTag("muons"),
vtx = cms.InputTag("offlinePrimaryVertices"),
muonPtCut = cms.double(20),
selectClones = cms.bool(False),
taggingMode = cms.bool(False),
)
cloneGlobalMuonTagger = badGlobalMuonTagger.clone(
selectClones = True
)
noBadGlobalMuons = cms.Sequence(~cloneGlobalMuonTagger + ~badGlobalMuonTagger)
|
e55d8bde30d28ba712b65a023f8e56194159a230
|
6181fcd4a266d963a0ee85971768c97922ca77cd
|
/src/garage/tf/q_functions/discrete_mlp_dueling_q_function.py
|
dd7e142ae67d97cb8a247e9f4404933dde01cfad
|
[
"MIT"
] |
permissive
|
rlworkgroup/garage
|
5d215bbecb3a4e74b504988d6684a7b04df69a80
|
2d594803636e341660cab0e81343abbe9a325353
|
refs/heads/master
| 2023-08-21T22:58:49.338034
| 2023-01-04T06:06:27
| 2023-01-04T06:06:27
| 136,846,372
| 1,832
| 363
|
MIT
| 2023-09-11T11:36:40
| 2018-06-10T21:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,015
|
py
|
discrete_mlp_dueling_q_function.py
|
"""Discrete MLP QFunction."""
import tensorflow as tf
from garage.tf.models import MLPDuelingModel
class DiscreteMLPDuelingQFunction(MLPDuelingModel):
"""Discrete Q Function with dualing MLP network.
This class implements a Q-value network. It predicts Q-value based on the
input state and action. It uses an MLP to fit the function Q(s, a).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Name of the q-function, also serves as the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this q-function consists of
two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization.
"""
def __init__(self,
env_spec,
name=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
self._env_spec = env_spec
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self.obs_dim = env_spec.observation_space.shape
action_dim = env_spec.action_space.flat_dim
super().__init__(name=name,
output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._network = None
self._initialize()
def _initialize(self):
"""Initialize QFunction."""
obs_ph = tf.compat.v1.placeholder(tf.float32, (None, ) + self.obs_dim,
name='obs')
self._network = super().build(obs_ph)
@property
def q_vals(self):
"""Return the Q values, the output of the network.
Return:
list[tf.Tensor]: Q values.
"""
return self._network.outputs
@property
def input(self):
"""Get input.
Return:
tf.Tensor: QFunction Input.
"""
return self._network.input
# pylint: disable=arguments-differ
def build(self, state_input, name):
"""Build the symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor to the network.
name (str): Network variable scope.
Return:
tf.Tensor: The tf.Tensor output of Discrete MLP QFunction.
"""
return super().build(state_input, name=name).outputs
def clone(self, name):
"""Return a clone of the Q-function.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created q-function.
Returns:
garage.tf.q_functions.DiscreteMLPQFunction: Clone of this object
"""
new_qf = self.__class__(name=name,
env_spec=self._env_spec,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
new_qf.parameters = self.parameters
return new_qf
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state.
"""
new_dict = super().__getstate__()
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
|
6a838fdf87885bd04e2fac349dcc9a0ff4650e3a
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/third_party/incubator-tvm/tests/python/integration/test_reduce.py
|
acbec36c510eb370e1de86ab3751a71071edd876
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 12,105
|
py
|
test_reduce.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
def test_reduce_prims():
def test_prim(reducer, np_reducer):
# graph
n = tvm.var('n')
m = tvm.var('m')
A = tvm.placeholder((n, m), name='A')
R = tvm.compute((n, ), lambda i: tvm.expr.Select((i > 1), 1, 0), name='R')
k = tvm.reduce_axis((0, m))
B = tvm.compute((n,), lambda i: reducer(A[i, k], axis=k, where=(R[i]==1)), name='B')
# schedule
s = tvm.create_schedule(B.op)
# create iter var and assign them tags.
num_thread = 1
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, tvm.thread_axis("blockIdx.x"))
s[B].bind(xi, tvm.thread_axis("threadIdx.x"))
s[R].compute_inline()
# one line to build the function.
def check_device(device, host="llvm"):
ctx = tvm.context(device, 0)
if not tvm.module.enabled(host):
return
if not ctx.exist:
print("skip because %s is not enabled.." % device)
return
freduce = tvm.build(s,
args=[A, B],
target=device, target_host=host,
name="myreduce")
# launch the kernel.
n = 1028
m = 129
x = tvm.nd.array(np.random.uniform(size=(n, m)).astype(A.dtype), ctx)
y = tvm.nd.array(np.zeros(n, dtype=B.dtype), ctx)
freduce(x, y)
npy = y.asnumpy()
npy[:2] = 0
res = np_reducer(x.asnumpy(), axis=1)
res[:2] = 0
tvm.testing.assert_allclose(npy, res, rtol=1e-4)
check_device("metal")
check_device("vulkan")
check_device("cuda")
check_device("opencl")
test_prim(tvm.sum, np.sum)
test_prim(tvm.min, np.amin)
test_prim(tvm.max, np.amax)
def test_rfactor():
n = tvm.convert(1027)
A = tvm.placeholder((n,), name='A')
k = tvm.reduce_axis((0, n))
B = tvm.compute((1,), lambda i: tvm.sum(A[k], axis=k), name='B')
# schedule
s = tvm.create_schedule(B.op)
kf, ki = s[B].split(k, nparts=4)
BF = s.rfactor(B, kf)
s[BF].parallel(BF.op.axis[0])
# one line to build the function.
def check_target(target="llvm"):
if not tvm.module.enabled(target):
return
ctx = tvm.cpu(0)
fapi = tvm.lower(s, args=[A, B])
fsum = tvm.build(fapi,
target=target,
name="mysum")
# launch the kernel.
n = 1027
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(1, dtype=B.dtype), ctx)
fsum(a, b)
res = np.sum(a.asnumpy(), axis=0)
tvm.testing.assert_allclose(
b.asnumpy(), res, rtol=1e-4)
check_target()
def test_rfactor_factor_axis():
n = tvm.convert(1027)
A = tvm.placeholder((n,), name='A')
k = tvm.reduce_axis((0, n))
B = tvm.compute((1,), lambda i: tvm.sum(A[k], axis=k), name='B')
# schedule
s = tvm.create_schedule(B.op)
kf, ki = s[B].split(k, nparts=4)
BF = s.rfactor(B, kf, 1)
s[BF].parallel(BF.op.axis[0])
# one line to build the function.
def check_target(target="llvm"):
if not tvm.module.enabled(target):
return
ctx = tvm.cpu(0)
fapi = tvm.lower(s, args=[A, B])
fsum = tvm.build(fapi,
target=target,
name="mysum")
# launch the kernel.
n = 1027
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(1, dtype=B.dtype), ctx)
fsum(a, b)
res = np.sum(a.asnumpy(), axis=0)
tvm.testing.assert_allclose(
b.asnumpy(), res, rtol=1e-4)
check_target()
def test_rfactor_threads():
nn = 1027
mm = 10
n = tvm.convert(nn)
m = tvm.convert(mm)
A = tvm.placeholder((m, n), name='A')
k = tvm.reduce_axis((0, n))
nthread = 16
B = tvm.compute((m,), lambda i: tvm.sum(A[i, k], axis=k, where=(i>1)), name='B')
# schedule
s = tvm.create_schedule(B.op)
ko, kf = s[B].split(k, factor=nthread)
BF = s.rfactor(B, kf)
bx, ty = s[B].split(s[B].op.axis[0], factor=nthread)
s[B].bind(bx, tvm.thread_axis("blockIdx.x"))
s[B].bind(ty, tvm.thread_axis("threadIdx.y"))
tx = s[B].op.reduce_axis[0]
thread_x = tvm.thread_axis("threadIdx.x")
s[B].bind(tx, thread_x)
s[BF].compute_at(s[B], tx)
s[B].set_store_predicate(thread_x.var.equal(0))
# one line to build the function.
def check_target(device, host="stackvm"):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(s, args=[A, B])
fsum = tvm.build(fapi,
target=device,
name="mysum")
# launch the kernel.
n = nn
m = mm
a = tvm.nd.array(np.random.uniform(size=(m, n)).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(m, dtype=B.dtype), ctx)
fsum(a, b)
res = np.sum(a.asnumpy(), axis=1)
res[:2] = 0
tvm.testing.assert_allclose(
b.asnumpy(), res, rtol=1e-4)
check_target("vulkan")
check_target("cuda")
check_target("metal")
check_target("opencl")
def test_rfactor_elemwise_threads():
n = 1025
m = 10
A = tvm.placeholder((m, n), name='A')
k = tvm.reduce_axis((0, n))
nthread = 16
B = tvm.compute((m,), lambda i: tvm.sum(A[i, k], axis=k), name='B')
BB = tvm.compute((m,), lambda i: B[i] + 1, name='BB')
C = tvm.compute((m,), lambda i: BB[i] + 1, name='C')
# schedule
s = tvm.create_schedule(C.op)
s[BB].compute_inline()
bx, ty = s[C].split(s[C].op.axis[0], factor=nthread)
ko, kf = s[B].split(k, factor=nthread)
BF = s.rfactor(B, kf)
s[B].compute_at(s[C], ty)
s[C].bind(bx, tvm.thread_axis("blockIdx.x"))
s[C].bind(ty, tvm.thread_axis("threadIdx.y"))
tx = s[B].op.reduce_axis[0]
thread_x = tvm.thread_axis("threadIdx.x")
s[B].bind(tx, thread_x)
s[BF].compute_at(s[B], tx)
# Since thread_x is shared across reductions
# only one of them need to do write back
s[B].set_store_predicate(thread_x.var.equal(0))
s[C].set_store_predicate(thread_x.var.equal(0))
# one line to build the function.
def check_target(device, host="stackvm"):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(s, args=[A, C])
fsum = tvm.build(fapi,
target=device,
name="mysum")
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=(m, n)).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(m, dtype=B.dtype), ctx)
fsum(a, b)
res = np.sum(a.asnumpy(), axis=1) + 2
tvm.testing.assert_allclose(
b.asnumpy(), res, rtol=1e-4)
check_target("vulkan")
check_target("cuda")
check_target("metal")
check_target("opencl")
def test_argmax():
def fcombine(x, y):
lhs = tvm.make.Select((x[1] >= y[1]), x[0], y[0])
rhs = tvm.make.Select((x[1] >= y[1]), x[1], y[1])
return lhs, rhs
def fidentity(t0, t1):
return tvm.const(-1, t0), tvm.min_value(t1)
argmax = tvm.comm_reducer(fcombine,
fidentity,
name='argmax')
m = tvm.var('m')
n = tvm.var('n')
idx = tvm.placeholder((m, n), name='idx', dtype='int32')
val = tvm.placeholder((m, n), name='val', dtype='float32')
k = tvm.reduce_axis((0, n), 'k')
T0, T1 = tvm.compute((m,), lambda i: argmax((idx[i,k], val[i,k]), axis=k), name='T')
s = tvm.create_schedule(T0.op)
def check_target():
device = 'cpu'
if not tvm.module.enabled(device):
print("skip because %s is not enabled.." % device)
return
ctx = tvm.context(device, 0)
fapi = tvm.lower(s, args=[idx, val, T0, T1])
fargmax = tvm.build(fapi,
target='llvm',
name="argmax")
mm = 12
nn = 16
np_idx = np.repeat(np.arange(nn, dtype='int32').reshape(1, nn), mm, axis=0)
np_val = np.random.uniform(size=(mm, nn)).astype('float32')
np_res = np.argmax(np_val, axis=1)
nd_idx = tvm.nd.array(np_idx, ctx)
nd_val = tvm.nd.array(np_val, ctx)
nd_res0 = tvm.nd.array(np.zeros(mm, dtype='int32'), ctx)
nd_res1 = tvm.nd.array(np.zeros(mm, dtype='float32'), ctx)
fargmax(nd_idx, nd_val, nd_res0, nd_res1)
tvm.testing.assert_allclose(np_res, nd_res0.asnumpy())
check_target()
def test_rfactor_argmax():
def fcombine(x, y):
lhs = tvm.make.Select((x[1] >= y[1]), x[0], y[0])
rhs = tvm.make.Select((x[1] >= y[1]), x[1], y[1])
return lhs, rhs
def fidentity(t0, t1):
return tvm.const(-1, t0), tvm.min_value(t1)
argmax = tvm.comm_reducer(fcombine,
fidentity,
name='argmax')
nn = 1027
mm = 10
n = tvm.convert(nn)
m = tvm.convert(mm)
A0 = tvm.placeholder((m, n), name='A0', dtype='int32')
A1 = tvm.placeholder((m, n), name='A1', dtype='float32')
k = tvm.reduce_axis((0, n))
B0, B1 = tvm.compute((m,), lambda i: argmax((A0[i, k], A1[i, k]), axis=k), name='B')
# schedule
s = tvm.create_schedule(B0.op)
nthread = 16
ko, kf = s[B0].split(k, factor=nthread)
BF0, BF1 = s.rfactor(B0, kf)
bx, ty = s[B0].split(s[B0].op.axis[0], factor=nthread)
s[B0].bind(bx, tvm.thread_axis("blockIdx.x"))
s[B0].bind(ty, tvm.thread_axis("threadIdx.y"))
tx = s[B0].op.reduce_axis[0]
thread_x = tvm.thread_axis("threadIdx.x")
s[B0].bind(tx, thread_x)
s[BF0.op].compute_at(s[B0], tx)
s[B0].set_store_predicate(thread_x.var.equal(0))
def check_target(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(s, args=[A0, A1, B0, B1])
fargmax = tvm.build(fapi,
target=device,
name="argmax")
np_idx = np.repeat(np.arange(nn, dtype='int32').reshape(1, nn), mm, axis=0)
np_val = np.random.uniform(size=(mm, nn)).astype('float32')
np_res = np.argmax(np_val, axis=1)
nd_idx = tvm.nd.array(np_idx, ctx)
nd_val = tvm.nd.array(np_val, ctx)
nd_res0 = tvm.nd.array(np.zeros(mm, dtype='int32'), ctx)
nd_res1 = tvm.nd.array(np.zeros(mm, dtype='float32'), ctx)
fargmax(nd_idx, nd_val, nd_res0, nd_res1)
tvm.testing.assert_allclose(np_res, nd_res0.asnumpy())
check_target("cuda")
check_target("vulkan")
if __name__ == "__main__":
test_rfactor_elemwise_threads()
test_rfactor_threads()
test_rfactor_factor_axis()
test_rfactor()
test_reduce_prims()
test_argmax()
test_rfactor_argmax()
|
25e87b62adcfbd7ce6da0654824bf49f7c84b3bd
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoJets/FFTJetProducers/python/pileup_shape_Fall10_L1Hybrid_AK5PF_v1_cfi.py
|
7c4a13f99b5614f60d082d8d1dda3de400711886
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,116
|
py
|
pileup_shape_Fall10_L1Hybrid_AK5PF_v1_cfi.py
|
fftjet_pileup_eta_max = 5.0
fftjet_pileup_eta_bins = 256
fftjet_pileup_magnitude_factors = (0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.934901415155, 0.921793214113, 0.894093438628, 0.86863684533, 0.878324960935, 0.88801307654, 0.887280404372, 0.87612694443, 0.864973484489, 0.864578799665, 0.86487081544, 0.872577489585, 0.892381826907, 0.91218616423, 0.914413976405, 0.912783558872, 0.917217872946, 0.93891356524, 0.960609257533, 0.952614509173, 0.931895286247, 0.913135298357, 0.906410367365, 0.899685436372, 0.891192072077, 0.881418111732, 0.87182778515, 0.871234952613, 0.870642120077, 0.880467469405, 0.902522978519, 0.924578487632, 0.931377409681, 0.93648115539, 0.951804998157, 0.986967637021, 1.02213027588, 1.03986996453, 1.05269545276, 1.06847813777, 1.09474535492, 1.12101257207, 1.13736979394, 1.14862181119, 1.15767335789, 1.14692066972, 1.13616798155, 1.12465912311, 1.11250612595, 1.10035312879, 1.09060053898, 1.08089694012, 1.07155493564, 1.06271227328, 1.05386961091, 1.04534049682, 1.03686242651, 1.02824848542, 1.0193175125, 1.01038653959, 1.00343919752, 0.997188797984, 0.991040679533, 0.985358512105, 0.979676344677, 0.973871411656, 0.967991235596, 0.962142838815, 0.956792338955, 0.951441839095, 0.946508874826, 0.941993446149, 0.937478017472, 0.934011818838, 0.930612589436, 0.927488843552, 0.924814573105, 0.922140302659, 0.921115762562, 0.920453359144, 0.919535372435, 0.917889958669, 0.916244544902, 0.913704053521, 0.910779957448, 0.907773888308, 0.90426426778, 0.900754647251, 0.896035134324, 0.890439493516, 0.884847362278, 0.879427202654, 0.87400704303, 0.869594609611, 0.866365156221, 0.863135702831, 0.860475442699, 0.857878426261, 0.855583256346, 0.853874023397, 0.852164790448, 0.851788759232, 0.851788759232, 0.852164790448, 0.853874023397, 0.855583256346, 0.857878426261, 0.860475442699, 0.863135702831, 0.866365156221, 0.869594609611, 0.87400704303, 0.879427202654, 0.884847362278, 0.890439493516, 0.896035134324, 0.900754647251, 0.90426426778, 0.907773888308, 0.910779957448, 0.913704053521, 0.916244544902, 0.917889958669, 0.919535372435, 0.920453359144, 0.921115762562, 0.922140302659, 0.924814573105, 0.927488843552, 0.930612589436, 0.934011818838, 0.937478017472, 0.941993446149, 0.946508874826, 0.951441839095, 0.956792338955, 0.962142838815, 0.967991235596, 0.973871411656, 0.979676344677, 0.985358512105, 0.991040679533, 0.997188797984, 1.00343919752, 1.01038653959, 1.0193175125, 1.02824848542, 1.03686242651, 1.04534049682, 1.05386961091, 1.06271227328, 1.07155493564, 1.08089694012, 1.09060053898, 1.10035312879, 1.11250612595, 1.12465912311, 1.13616798155, 1.14692066972, 1.15767335789, 1.14862181119, 1.13736979394, 1.12101257207, 1.09474535492, 1.06847813777, 1.05269545276, 1.03986996453, 1.02213027588, 0.986967637021, 0.951804998157, 0.93648115539, 0.931377409681, 0.924578487632, 0.902522978519, 0.880467469405, 0.870642120077, 0.871234952613, 0.87182778515, 0.881418111732, 0.891192072077, 0.899685436372, 0.906410367365, 0.913135298357, 0.931895286247, 0.952614509173, 0.960609257533, 0.93891356524, 0.917217872946, 0.912783558872, 0.914413976405, 0.91218616423, 0.892381826907, 0.872577489585, 0.86487081544, 0.864578799665, 0.864973484489, 0.87612694443, 0.887280404372, 0.88801307654, 0.878324960935, 0.86863684533, 0.894093438628, 0.921793214113, 0.934901415155, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973, 0.925272109973)
|
115f101033bf00610b8a986a1b1705b86ba5ec20
|
8880226d2ca1c9448c44b3e9f21226a58e61ac93
|
/awacs/servicequotas.py
|
7c38e2f865c572db14202fded34108825ede40c6
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
cloudtools/awacs
|
2f82958ccc7ba2177492c29c706a5737f19dd2d1
|
c449a9637f01c26e73b827a9f8d5cc7715bbbea2
|
refs/heads/main
| 2023-08-31T00:58:28.636568
| 2023-08-28T05:13:01
| 2023-08-28T05:13:01
| 9,062,692
| 385
| 107
|
BSD-2-Clause
| 2023-08-13T23:21:39
| 2013-03-27T20:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,022
|
py
|
servicequotas.py
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from typing import Optional
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Service Quotas"
prefix = "servicequotas"
class Action(BaseAction):
def __init__(self, action: Optional[str] = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AssociateServiceQuotaTemplate = Action("AssociateServiceQuotaTemplate")
DeleteServiceQuotaIncreaseRequestFromTemplate = Action(
"DeleteServiceQuotaIncreaseRequestFromTemplate"
)
DisassociateServiceQuotaTemplate = Action("DisassociateServiceQuotaTemplate")
GetAWSDefaultServiceQuota = Action("GetAWSDefaultServiceQuota")
GetAssociationForServiceQuotaTemplate = Action("GetAssociationForServiceQuotaTemplate")
GetRequestedServiceQuotaChange = Action("GetRequestedServiceQuotaChange")
GetServiceQuota = Action("GetServiceQuota")
GetServiceQuotaIncreaseRequestFromTemplate = Action(
"GetServiceQuotaIncreaseRequestFromTemplate"
)
ListAWSDefaultServiceQuotas = Action("ListAWSDefaultServiceQuotas")
ListRequestedServiceQuotaChangeHistory = Action(
"ListRequestedServiceQuotaChangeHistory"
)
ListRequestedServiceQuotaChangeHistoryByQuota = Action(
"ListRequestedServiceQuotaChangeHistoryByQuota"
)
ListServiceQuotaIncreaseRequestsInTemplate = Action(
"ListServiceQuotaIncreaseRequestsInTemplate"
)
ListServiceQuotas = Action("ListServiceQuotas")
ListServices = Action("ListServices")
ListTagsForResource = Action("ListTagsForResource")
PutServiceQuotaIncreaseRequestIntoTemplate = Action(
"PutServiceQuotaIncreaseRequestIntoTemplate"
)
RequestServiceQuotaIncrease = Action("RequestServiceQuotaIncrease")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
|
b44d9e8822ed036a7869f67891b646d67405bb37
|
8a85eb9b50864626cd2674f15b07df3d5dbe0b73
|
/neo/io/openephysbinaryio.py
|
aad629fa9cc94b735b647283f1a2a7e25644e58b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NeuralEnsemble/python-neo
|
287d3457a44c45f4dcbee0e9f9a2a5d83142de69
|
354c8d9d5fbc4daad3547773d2f281f8c163d208
|
refs/heads/master
| 2023-09-06T03:29:34.835053
| 2023-09-01T09:17:14
| 2023-09-01T09:17:14
| 3,949,530
| 265
| 213
|
BSD-3-Clause
| 2023-09-14T19:09:24
| 2012-04-06T12:48:48
|
Python
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
openephysbinaryio.py
|
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.openephysbinaryrawio import OpenEphysBinaryRawIO
class OpenEphysBinaryIO(OpenEphysBinaryRawIO, BaseFromRaw):
_prefered_signal_group_mode = 'group-by-same-units'
mode = 'dir'
def __init__(self, dirname):
OpenEphysBinaryRawIO.__init__(self, dirname=dirname)
BaseFromRaw.__init__(self, dirname)
|
17ed59bd3f2380f4b23869e432495c434ed61260
|
6a63f3baf8a5a54bba1974acd3a4dc14f486eb88
|
/pyexcel/internal/meta.py
|
eeac18b963262567f4ae759308f8e8924c7a83b2
|
[
"BSD-3-Clause"
] |
permissive
|
pyexcel/pyexcel
|
7ecda49447a7576119441a9a72fb5ecbd46f9ec0
|
f1bf5fe0f9119b64c9a87ad57d9fc771cbc1a52d
|
refs/heads/dev
| 2023-08-04T14:33:11.890759
| 2023-07-16T11:19:18
| 2023-07-16T11:19:18
| 23,225,199
| 1,190
| 182
|
NOASSERTION
| 2023-07-18T00:16:44
| 2014-08-22T12:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 13,164
|
py
|
meta.py
|
"""
pyexcel.internal.meta
~~~~~~~~~~~~~~~~~~~~~~
Annotate sheet and book class' attributes
:copyright: (c) 2015-2022 by Onni Software Ltd.
:license: New BSD License
"""
import sys
from functools import partial
from pyexcel import constants
from pyexcel import docstrings as docs
from pyexcel._compact import PY2, append_doc
from pyexcel.internal import SOURCE
from pyexcel.internal.core import save_book, save_sheet, get_sheet_stream
from pyexcel.internal.utils import make_a_property
def make_presenter(source_getter, attribute=None):
"""make a custom presentation method for each file types"""
def custom_presenter(self, **keywords):
"""docstring is assigned a few lines down the line"""
keyword = SOURCE.get_keyword_for_parameter(attribute)
keywords[keyword] = attribute
memory_source = source_getter(**keywords)
memory_source.write_data(self)
try:
content_stream = memory_source.get_content()
content = content_stream.getvalue()
except AttributeError:
# python 3 _io.TextWrapper
content = None
return content
custom_presenter.__doc__ = "Get data in %s format" % attribute
return custom_presenter
def sheet_presenter(attribute=None):
"""make a custom presentation method for sheet"""
source_getter = SOURCE.get_writable_source
return make_presenter(source_getter, attribute)
def book_presenter(attribute=None):
"""make a custom presentation method for book"""
source_getter = SOURCE.get_writable_book_source
return make_presenter(source_getter, attribute)
def importer(attribute=None):
"""make a custom input method for sheet"""
def custom_importer1(self, content, **keywords):
"""docstring is assigned a few lines down the line"""
sheet_params = {}
for field in constants.VALID_SHEET_PARAMETERS:
if field in keywords:
sheet_params[field] = keywords.pop(field)
keyword = SOURCE.get_keyword_for_parameter(attribute)
if keyword == "file_type":
keywords[keyword] = attribute
keywords["file_content"] = content
else:
keywords[keyword] = content
named_content = get_sheet_stream(**keywords)
self.init(named_content.payload, named_content.name, **sheet_params)
custom_importer1.__doc__ = "Set data in %s format" % attribute
return custom_importer1
def book_importer(attribute=None):
"""make a custom input method for book"""
def custom_book_importer(self, content, **keywords):
"""docstring is assigned a few lines down the line"""
keyword = SOURCE.get_keyword_for_parameter(attribute)
if keyword == "file_type":
keywords[keyword] = attribute
keywords["file_content"] = content
else:
keywords[keyword] = content
sheets, filename, path = _get_book(**keywords)
self.init(sheets=sheets, filename=filename, path=path)
custom_book_importer.__doc__ = "Set data in %s format" % attribute
return custom_book_importer
def attribute(
cls,
file_type,
instance_name="Sheet",
description=constants.OUT_FILE_TYPE_DOC_STRING,
**keywords
):
"""
create custom attributes for each class
"""
doc_string = description.format(file_type, instance_name)
make_a_property(cls, file_type, doc_string, **keywords)
REGISTER_PRESENTATION = partial(
attribute,
getter_func=sheet_presenter,
description=constants.OUT_FILE_TYPE_DOC_STRING,
)
REGISTER_BOOK_PRESENTATION = partial(
attribute,
getter_func=book_presenter,
instance_name="Book",
description=constants.OUT_FILE_TYPE_DOC_STRING,
)
REGISTER_INPUT = partial(
attribute,
setter_func=importer,
description=constants.IN_FILE_TYPE_DOC_STRING,
)
REGISTER_BOOK_INPUT = partial(
attribute,
instance_name="Book",
setter_func=book_importer,
description=constants.IN_FILE_TYPE_DOC_STRING,
)
REGISTER_IO = partial(
attribute,
getter_func=sheet_presenter,
setter_func=importer,
description=constants.IO_FILE_TYPE_DOC_STRING,
)
REGISTER_BOOK_IO = partial(
attribute,
getter_func=book_presenter,
setter_func=book_importer,
instance_name="Book",
description=constants.IO_FILE_TYPE_DOC_STRING,
)
class StreamAttribute():
"""Provide access to get_*_stream methods"""
def __init__(self, cls):
self.cls = cls
def __getattr__(self, name):
getter = getattr(self.cls, "save_to_memory")
return getter(file_type=name)
class PyexcelObject():
"""parent class for pyexcel.Sheet and pyexcel.Book"""
@property
def stream(self):
"""Return a stream in which the content is properly encoded
Example::
>>> import pyexcel as p
>>> b = p.get_book(bookdict={"A": [[1]]})
>>> csv_stream = b.stream.texttable
>>> print(csv_stream.getvalue())
A:
+---+
| 1 |
+---+
Where b.stream.xls.getvalue() is equivalent to b.xls. In some situation
b.stream.xls is preferred than b.xls.
Sheet examples::
>>> import pyexcel as p
>>> s = p.Sheet([[1]], 'A')
>>> csv_stream = s.stream.texttable
>>> print(csv_stream.getvalue())
A:
+---+
| 1 |
+---+
Where s.stream.xls.getvalue() is equivalent to s.xls. In some situation
s.stream.xls is preferred than s.xls.
It is similar to :meth:`~pyexcel.Book.save_to_memory`.
"""
return StreamAttribute(self)
def save_to_memory(self, file_type, **keywords):
"""Save the content to memory
:param file_type: any value of 'csv', 'tsv', 'csvz',
'tsvz', 'xls', 'xlsm', 'xlsm', 'ods'
:param stream: the memory stream to be written to. Note in
Python 3, for csv and tsv format, please
pass an instance of StringIO. For xls, xlsx,
and ods, an instance of BytesIO.
"""
raise NotImplementedError("save to memory is not implemented")
def plot(self, file_type="svg", **keywords):
"""
Visualize the data
Parameters:
-----------------
file_type:string
'svg' by default. 'png', 'jpeg' possible depending on plugins
chart_type:string
'bar' by default. other chart types are subjected to plugins.
"""
memory_content = self.save_to_memory(file_type, **keywords)
if file_type in ["png", "svg", "jpeg"]:
# make the signature for jypter notebook
def get_content(self):
return self.getvalue().decode("utf-8")
setattr(
memory_content,
"_repr_%s_" % file_type,
partial(get_content, memory_content),
)
return memory_content
def _repr_html_(self):
return self.html
def __repr__(self):
if PY2:
default_encoding = sys.getdefaultencoding()
if default_encoding == "ascii":
result = self.texttable
return result.encode("utf-8")
return self.texttable
def __str__(self):
return self.__repr__()
class SheetMeta(PyexcelObject):
"""Annotate sheet attributes"""
register_io = classmethod(REGISTER_IO)
register_presentation = classmethod(REGISTER_PRESENTATION)
register_input = classmethod(REGISTER_INPUT)
@append_doc(docs.SAVE_AS_OPTIONS)
def save_as(self, filename, **keywords):
"""Save the content to a named file"""
return save_sheet(self, file_name=filename, **keywords)
def save_to_memory(self, file_type, stream=None, **keywords):
stream = save_sheet(
self, file_type=file_type, file_stream=stream, **keywords
)
return stream
def save_to_django_model(
self, model, initializer=None, mapdict=None, batch_size=None
):
"""Save to database table through django model
:param model: a database model
:param initializer: a initialization functions for your model
:param mapdict: custom map dictionary for your data columns
:param batch_size: a parameter to Django concerning the size
for bulk insertion
"""
save_sheet(
self,
model=model,
initializer=initializer,
mapdict=mapdict,
batch_size=batch_size,
)
def save_to_database(
self, session, table, initializer=None, mapdict=None, auto_commit=True
):
"""Save data in sheet to database table
:param session: database session
:param table: a database table
:param initializer: a initialization functions for your table
:param mapdict: custom map dictionary for your data columns
:param auto_commit: by default, data is auto committed.
"""
save_sheet(
self,
session=session,
table=table,
initializer=initializer,
mapdict=mapdict,
auto_commit=auto_commit,
)
class BookMeta(PyexcelObject):
"""Annotate book attributes"""
register_io = classmethod(REGISTER_BOOK_IO)
register_presentation = classmethod(REGISTER_BOOK_PRESENTATION)
register_input = classmethod(REGISTER_BOOK_INPUT)
@append_doc(docs.SAVE_AS_OPTIONS)
def save_as(self, filename, **keywords):
"""
Save the content to a new file
"""
return save_book(self, file_name=filename, **keywords)
def save_to_memory(self, file_type, stream=None, **keywords):
"""
Save the content to a memory stream
:param file_type: what format the stream is in
:param stream: a memory stream. Note in Python 3, for csv and tsv
format, please pass an instance of StringIO. For xls,
xlsx, and ods, an instance of BytesIO.
"""
stream = save_book(
self, file_type=file_type, file_stream=stream, **keywords
)
return stream
def save_to_django_models(
self, models, initializers=None, mapdicts=None, **keywords
):
"""
Save to database table through django model
:param models: a list of database models, that is accepted by
:meth:`Sheet.save_to_django_model`. The sequence
of tables matters when there is dependencies in
between the tables. For example, **Car** is made
by **Car Maker**. **Car Maker** table should be
specified before **Car** table.
:param initializers: a list of intialization functions for your
tables and the sequence should match tables,
:param mapdicts: custom map dictionary for your data columns
and the sequence should match tables
optional parameters:
:param batch_size: django bulk_create batch size
:param bulk_save: whether to use bulk_create or to use single save
per record
"""
save_book(
self,
models=models,
initializers=initializers,
mapdicts=mapdicts,
**keywords
)
def save_to_database(
self,
session,
tables,
initializers=None,
mapdicts=None,
auto_commit=True,
):
"""
Save data in sheets to database tables
:param session: database session
:param tables: a list of database tables, that is accepted by
:meth:`Sheet.save_to_database`. The sequence of tables
matters when there is dependencies in between the
tables. For example, **Car** is made by **Car Maker**.
**Car Maker** table should
be specified before **Car** table.
:param initializers: a list of intialization functions for your
tables and the sequence should match tables,
:param mapdicts: custom map dictionary for your data columns
and the sequence should match tables
:param auto_commit: by default, data is committed.
"""
save_book(
self,
session=session,
tables=tables,
initializers=initializers,
mapdicts=mapdicts,
auto_commit=auto_commit,
)
def _get_book(**keywords):
"""Get an instance of :class:`Book` from an excel source
Where the dictionary should have text as keys and two dimensional
array as values.
"""
a_source = SOURCE.get_book_source(**keywords)
sheets = a_source.get_data()
filename, path = a_source.get_source_info()
return sheets, filename, path
|
1fc7497831427755c8b56eb433d564f445fc3e2e
|
2258118ac438188696e058b821aa5e8166d94f82
|
/pre/ShotDetection/shotdetect/shot_detector.py
|
1d28456a2a391735b8b23bcc95781c7b49bf48a6
|
[
"BSD-3-Clause"
] |
permissive
|
AnyiRao/SceneSeg
|
4c5ea7f6823e9006d0df21702a7ab69e8be17a3d
|
87adf90160a1f490e5c49732227c500d38a7d933
|
refs/heads/master
| 2023-08-16T22:31:41.814091
| 2023-08-14T22:20:05
| 2023-08-14T22:20:05
| 245,794,561
| 210
| 46
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,993
|
py
|
shot_detector.py
|
# The codes below partially refer to the PySceneDetect. According
# to its BSD 3-Clause License, we keep the following.
#
# PySceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.bcastell.com/projects/PySceneDetect/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
# [ Documentation: http://pyscenedetect.readthedocs.org/ ]
#
# Copyright (C) 2014-2021 Brandon Castellano <http://www.bcastell.com>.
# pylint: disable=unused-argument, no-self-use
class shotDetector(object):
""" Base class to inheret from when implementing a shot detection algorithm.
Also see the implemented shot detectors in the shotdetect.detectors module
to get an idea of how a particular detector can be created.
"""
stats_manager = None
""" Optional :py:class:`StatsManager <shotdetect.stats_manager.StatsManager>` to
use for caching frame metrics to and from."""
_metric_keys = []
""" List of frame metric keys to be registered with the :py:attr:`stats_manager`,
if available. """
cli_name = 'detect-none'
""" Name of detector to use in command-line interface description. """
def is_processing_required(self, frame_num):
""" Is Processing Required: Test if all calculations for a given frame are already done.
Returns:
bool: False if the shotDetector has assigned _metric_keys, and the
stats_manager property is set to a valid StatsManager object containing
the required frame metrics/calculations for the given frame - thus, not
needing the frame to perform shot detection.
True otherwise (i.e. the frame_img passed to process_frame is required
to be passed to process_frame for the given frame_num).
"""
return not self._metric_keys or not (
self.stats_manager is not None and
self.stats_manager.metrics_exist(frame_num, self._metric_keys))
def get_metrics(self):
""" Get Metrics: Get a list of all metric names/keys used by the detector.
Returns:
List[str]: A list of strings of frame metric key names that will be used by
the detector when a StatsManager is passed to process_frame.
"""
return self._metric_keys
def process_frame(self, frame_num, frame_img):
""" Process Frame: Computes/stores metrics and detects any shot changes.
Prototype method, no actual detection.
Returns:
List[int]: List of frame numbers of cuts to be added to the cutting list.
"""
return []
def post_process(self, frame_num):
""" Post Process: Performs any processing after the last frame has been read.
Prototype method, no actual detection.
Returns:
List[int]: List of frame numbers of cuts to be added to the cutting list.
"""
return []
|
60ede307a0d9ba940e3441ac4b7bfcdcf54b7041
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/Heppy/python/analyzers/examples/ntuple.py
|
9b398d871770235b35314e26c7ebf5b8ed6785af
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,155
|
py
|
ntuple.py
|
#!/bin/env python
from __future__ import print_function
def var( tree, varName, type=float ):
tree.var(varName, type)
def fill( tree, varName, value ):
tree.fill( varName, value )
# event information
def bookEvent(tree):
var(tree, 'run')
var(tree, 'lumi')
var(tree, 'event')
def fillEvent(tree, event):
fill(tree, 'run', event.run)
fill(tree, 'lumi', event.lumi)
fill(tree, 'event', event.eventId)
# simple particle
def bookParticle( tree, pName ):
var(tree, '{pName}_pdgid'.format(pName=pName))
var(tree, '{pName}_e'.format(pName=pName))
var(tree, '{pName}_pt'.format(pName=pName))
var(tree, '{pName}_eta'.format(pName=pName))
var(tree, '{pName}_phi'.format(pName=pName))
var(tree, '{pName}_m'.format(pName=pName))
var(tree, '{pName}_q'.format(pName=pName))
def fillParticle( tree, pName, particle ):
fill(tree, '{pName}_pdgid'.format(pName=pName), particle.pdgId() )
fill(tree, '{pName}_e'.format(pName=pName), particle.energy() )
fill(tree, '{pName}_pt'.format(pName=pName), particle.pt() )
fill(tree, '{pName}_eta'.format(pName=pName), particle.eta() )
fill(tree, '{pName}_phi'.format(pName=pName), particle.phi() )
fill(tree, '{pName}_m'.format(pName=pName), particle.mass() )
fill(tree, '{pName}_q'.format(pName=pName), particle.charge() )
def bookMet(tree, pName):
var(tree, '{pName}_pt'.format(pName=pName))
var(tree, '{pName}_phi'.format(pName=pName))
var(tree, '{pName}_sumet'.format(pName=pName))
def fillMet(tree, pName, met):
fill(tree, '{pName}_pt'.format(pName=pName), met.pt())
fill(tree, '{pName}_phi'.format(pName=pName), met.phi())
fill(tree, '{pName}_sumet'.format(pName=pName), met.sumEt())
def bookGenTau(tree, pName, pfdiscs, calodiscs):
bookJet(tree, pName)
bookTau(tree, '{pName}_calo'.format(pName=pName), calodiscs)
bookTau(tree, '{pName}_pf'.format(pName=pName), pfdiscs)
bookJet(tree, '{pName}_pfjet'.format(pName=pName))
def fillGenTau(tree, pName, tau):
fillJet(tree, pName, tau)
fillTau(tree, '{pName}_calo'.format(pName=pName), tau.match_calo)
fillTau(tree, '{pName}_pf'.format(pName=pName), tau.match_pf)
fillJet(tree, '{pName}_pfjet'.format(pName=pName), tau.match_pfjet)
def bookTau(tree, pName, discNames):
bookParticle(tree, pName)
var(tree, '{pName}_nsigcharged'.format(pName=pName))
var(tree, '{pName}_isolation'.format(pName=pName))
for discName in discNames:
var(tree, '{pName}_{disc}'.format(pName=pName,
disc=discName))
def fillTau(tree, pName, tau):
if not tau: return
fillParticle(tree, pName, tau)
fill(tree, '{pName}_nsigcharged'.format(pName=pName), len(tau.signalCharged()))
fill(tree, '{pName}_isolation'.format(pName=pName), tau.isolation())
for discName, value in tau.discs.items():
fill(tree, '{pName}_{disc}'.format(pName=pName,
disc=discName), value)
# jet
def bookComponent( tree, pName ):
var(tree, '{pName}_e'.format(pName=pName))
var(tree, '{pName}_pt'.format(pName=pName))
var(tree, '{pName}_num'.format(pName=pName))
def fillComponent(tree, pName, component):
fill(tree, '{pName}_e'.format(pName=pName), component.e() )
fill(tree, '{pName}_pt'.format(pName=pName), component.pt() )
fill(tree, '{pName}_num'.format(pName=pName), component.num() )
pdgids = [211, 22, 130, 11, 13]
def bookJet( tree, pName ):
bookParticle(tree, pName )
for pdgid in pdgids:
bookComponent(tree, '{pName}_{pdgid:d}'.format(pName=pName, pdgid=pdgid))
# var(tree, '{pName}_npart'.format(pName=pName))
def fillJet( tree, pName, jet ):
if not jet: return
fillParticle(tree, pName, jet )
for pdgid in pdgids:
component = jet.constituents.get(pdgid, None)
if component is not None:
fillComponent(tree,
'{pName}_{pdgid:d}'.format(pName=pName, pdgid=pdgid),
component )
else:
import pdb; pdb.set_trace()
print(jet)
|
b106f2ec0ed1498c76f348336c22370dc2f4fc99
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/minigame/IceTreasure.py
|
84b4e519c9084763ad620be6838f813825445129
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,174
|
py
|
IceTreasure.py
|
from panda3d.core import Point3, CollisionSphere, CollisionNode, BitMask32
from direct.interval.IntervalGlobal import Sequence, LerpScaleInterval, Parallel, Func, SoundInterval
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase import ToontownGlobals
from toontown.battle import BattleParticles
class IceTreasure(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('IceTreasure')
RADIUS = 1.0
def __init__(self, model, pos, serialNum, gameId, penalty = False):
self.serialNum = serialNum
self.penalty = penalty
center = model.getBounds().getCenter()
center = Point3(0, 0, 0)
self.nodePath = model.copyTo(render)
self.nodePath.setPos(pos[0] - center[0], pos[1] - center[1], pos[2] - center[2])
self.nodePath.setZ(0)
self.notify.debug('newPos = %s' % self.nodePath.getPos())
if self.penalty:
self.sphereName = 'penaltySphere-%s-%s' % (gameId, self.serialNum)
else:
self.sphereName = 'treasureSphere-%s-%s' % (gameId, self.serialNum)
self.collSphere = CollisionSphere(center[0], center[1], center[2], self.RADIUS)
self.collSphere.setTangible(0)
self.collNode = CollisionNode(self.sphereName)
self.collNode.setIntoCollideMask(ToontownGlobals.PieBitmask)
self.collNode.addSolid(self.collSphere)
self.collNodePath = render.attachNewNode(self.collNode)
self.collNodePath.setPos(pos[0] - center[0], pos[1] - center[1], pos[2] - center[2])
self.collNodePath.hide()
self.track = None
if self.penalty:
self.tip = self.nodePath.find('**/fusetip')
sparks = BattleParticles.createParticleEffect(file='icetnt')
self.sparksEffect = sparks
sparks.start(self.tip)
self.penaltyGrabSound = loader.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.ogg')
self.penaltyGrabSound.setVolume(0.75)
kaboomAttachPoint = self.nodePath.attachNewNode('kaboomAttach')
kaboomAttachPoint.setZ(3)
self.kaboom = loader.loadModel('phase_4/models/minigames/ice_game_kaboom')
self.kaboom.reparentTo(kaboomAttachPoint)
self.kaboom.setScale(2.0)
self.kaboom.setBillboardPointEye()
return
def destroy(self):
self.ignoreAll()
if self.penalty:
self.sparksEffect.cleanup()
if self.track:
self.track.finish()
self.nodePath.removeNode()
del self.nodePath
del self.collSphere
self.collNodePath.removeNode()
del self.collNodePath
del self.collNode
def showGrab(self):
self.nodePath.hide()
self.collNodePath.hide()
self.collNode.setIntoCollideMask(BitMask32(0))
if self.penalty:
self.track = Parallel(SoundInterval(self.penaltyGrabSound), Sequence(Func(self.kaboom.showThrough), LerpScaleInterval(self.kaboom, duration=0.5, scale=Point3(10, 10, 10), blendType='easeOut'), Func(self.kaboom.hide)))
self.track.start()
|
efacbbde56b67f21546a24bd366b5ff7ca41d31c
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/starboard/tools/win_symlink_fast.py
|
8c5e9407b174c2bfadcb5d447f04dfd6ac918140
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 7,216
|
py
|
win_symlink_fast.py
|
#!/usr/bin/python
# Copyright 2019 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functions for symlinking on Windows."""
import ctypes
from ctypes import wintypes
import os
DWORD = wintypes.DWORD
LPCWSTR = wintypes.LPCWSTR
HANDLE = wintypes.HANDLE
LPVOID = wintypes.LPVOID
BOOL = wintypes.BOOL
USHORT = wintypes.USHORT
ULONG = wintypes.ULONG
WCHAR = wintypes.WCHAR
kernel32 = ctypes.windll.kernel32
LPDWORD = ctypes.POINTER(DWORD)
UCHAR = ctypes.c_ubyte
GetFileAttributesW = kernel32.GetFileAttributesW
GetFileAttributesW.restype = DWORD
GetFileAttributesW.argtypes = (LPCWSTR,) # lpFileName In
INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
FILE_ATTRIBUTE_REPARSE_POINT = 0x00400
CreateFileW = kernel32.CreateFileW
CreateFileW.restype = HANDLE
CreateFileW.argtypes = (
LPCWSTR, # lpFileName In
DWORD, # dwDesiredAccess In
DWORD, # dwShareMode In
LPVOID, # lpSecurityAttributes In_opt
DWORD, # dwCreationDisposition In
DWORD, # dwFlagsAndAttributes In
HANDLE) # hTemplateFile In_opt
CloseHandle = kernel32.CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = (HANDLE,) # hObject In
INVALID_HANDLE_VALUE = HANDLE(-1).value # pylint:disable=invalid-name
OPEN_EXISTING = 3
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
DeviceIoControl = kernel32.DeviceIoControl
DeviceIoControl.restype = BOOL
DeviceIoControl.argtypes = (
HANDLE, # hDevice In
DWORD, # dwIoControlCode In
LPVOID, # lpInBuffer In_opt
DWORD, # nInBufferSize In
LPVOID, # lpOutBuffer Out_opt
DWORD, # nOutBufferSize In
LPDWORD, # lpBytesReturned Out_opt
LPVOID) # lpOverlapped Inout_opt
FSCTL_GET_REPARSE_POINT = 0x000900A8
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
IO_REPARSE_TAG_SYMLINK = 0xA000000C
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 0x4000
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
# Developer Mode must be enabled in order to use the following flag.
SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x2
SYMLINK_FLAG_RELATIVE = 0x1
class GenericReparseBuffer(ctypes.Structure):
"""Win32 api data structure."""
_fields_ = (('DataBuffer', UCHAR * 1),)
class SymbolicLinkReparseBuffer(ctypes.Structure):
"""Win32 api data structure."""
_fields_ = (('SubstituteNameOffset', USHORT),
('SubstituteNameLength', USHORT), ('PrintNameOffset', USHORT),
('PrintNameLength', USHORT), ('Flags', ULONG), ('PathBuffer',
WCHAR * 1))
@property
def print_name(self):
arrayt = WCHAR * (self.PrintNameLength // 2)
offset = type(self).PathBuffer.offset + self.PrintNameOffset
return arrayt.from_address(ctypes.addressof(self) + offset).value
@property
def substitute_name(self):
arrayt = WCHAR * (self.SubstituteNameLength // 2)
offset = type(self).PathBuffer.offset + self.SubstituteNameOffset
return arrayt.from_address(ctypes.addressof(self) + offset).value
@property
def is_relative_path(self):
return bool(self.Flags & SYMLINK_FLAG_RELATIVE)
class MountPointReparseBuffer(ctypes.Structure):
"""Win32 api data structure."""
_fields_ = (('SubstituteNameOffset', USHORT),
('SubstituteNameLength', USHORT), ('PrintNameOffset', USHORT),
('PrintNameLength', USHORT), ('PathBuffer', WCHAR * 1))
@property
def print_name(self):
arrayt = WCHAR * (self.PrintNameLength // 2)
offset = type(self).PathBuffer.offset + self.PrintNameOffset
return arrayt.from_address(ctypes.addressof(self) + offset).value
@property
def substitute_name(self):
arrayt = WCHAR * (self.SubstituteNameLength // 2)
offset = type(self).PathBuffer.offset + self.SubstituteNameOffset
return arrayt.from_address(ctypes.addressof(self) + offset).value
class ReparseDataBuffer(ctypes.Structure):
"""Win32 api data structure."""
class ReparseBuffer(ctypes.Union):
"""Win32 api data structure."""
_fields_ = (('SymbolicLinkReparseBuffer', SymbolicLinkReparseBuffer),
('MountPointReparseBuffer', MountPointReparseBuffer),
('GenericReparseBuffer', GenericReparseBuffer))
_fields_ = (('ReparseTag', ULONG), ('ReparseDataLength', USHORT),
('Reserved', USHORT), ('ReparseBuffer', ReparseBuffer))
_anonymous_ = ('ReparseBuffer',)
def _ToUnicode(s):
try:
return s.decode('utf-8')
except AttributeError:
return s
_kdll = None
def _GetKernel32Dll():
global _kdll
if _kdll:
return _kdll
_kdll = ctypes.windll.LoadLibrary('kernel32.dll')
return _kdll
def FastCreateReparseLink(from_folder, link_folder):
"""Creates a reparse link.
Args:
from_folder: The folder that the link will point to.
link_folder: The path of the link to be created.
Returns:
None
Raises:
OSError: if link cannot be created
"""
from_folder = _ToUnicode(from_folder)
link_folder = _ToUnicode(link_folder)
par_dir = os.path.dirname(link_folder)
if not os.path.isdir(par_dir):
os.makedirs(par_dir)
kdll = _GetKernel32Dll()
# Only supported from Windows 10 Insiders build 14972
flags = SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE | \
SYMBOLIC_LINK_FLAG_DIRECTORY
ok = kdll.CreateSymbolicLinkW(link_folder, from_folder, flags)
if not ok or not FastIsReparseLink(link_folder):
raise OSError('Could not create sym link ' + link_folder + ' to ' +
from_folder)
def FastIsReparseLink(path):
path = _ToUnicode(path)
result = GetFileAttributesW(path)
if result == INVALID_FILE_ATTRIBUTES:
return False
return bool(result & FILE_ATTRIBUTE_REPARSE_POINT)
def FastReadReparseLink(path):
"""See api docstring, above."""
path = _ToUnicode(path)
reparse_point_handle = CreateFileW(
path, 0, 0, None, OPEN_EXISTING,
FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, None)
if reparse_point_handle == INVALID_HANDLE_VALUE:
return None
# Remove false positive below.
# pylint: disable=deprecated-method
target_buffer = ctypes.c_buffer(MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
n_bytes_returned = DWORD()
io_result = DeviceIoControl(reparse_point_handle, FSCTL_GET_REPARSE_POINT,
None, 0, target_buffer, len(target_buffer),
ctypes.byref(n_bytes_returned), None)
CloseHandle(reparse_point_handle)
if not io_result:
return None
rdb = ReparseDataBuffer.from_buffer(target_buffer)
if rdb.ReparseTag == IO_REPARSE_TAG_SYMLINK:
return rdb.SymbolicLinkReparseBuffer.print_name
elif rdb.ReparseTag == IO_REPARSE_TAG_MOUNT_POINT:
return rdb.MountPointReparseBuffer.print_name
return None
|
6f3c8310f0275cf0f7a072e7aa0457c0182aaee9
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/tools/history-viz.py
|
4a4d787826b5435f7793ea917990b9f708ad72f9
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 8,007
|
py
|
history-viz.py
|
#!/usr/bin/env python
# Copyright 2011 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Process a History database and dump a .dot file suitable for GraphViz.
This is useful for debugging history redirect flows.
An example run of this program:
python /src/history-viz.py History > foo.dot
/c/Program\ Files/Graphviz2.18/bin/dot -Tpng foo.dot -o foo.png
"""
import struct
import subprocess
import sys
import urlparse
# Some transition types, copied from page_transition_types.h.
TRANS_TYPES = {
0: 'link',
1: 'typed',
2: 'most-visited',
3: 'auto subframe',
7: 'form',
}
class URL(object):
"""Represents a broken-down URL from our most visited database."""
def __init__(self, id, url):
"""Initialize a new URL object. |id| is the database id of the URL."""
self.id = id
self.url = url
scheme, loc, path, query, fragment = urlparse.urlsplit(url)
if scheme == 'http':
scheme = '' # Shorten for display purposes.
if len(scheme) > 0:
scheme += '://'
self.host = scheme + loc
self.path = path
extra = ''
if len(query) > 0:
extra += '?' + query
if len(fragment) > 0 or url.find('#') > 0:
extra += '#' + fragment
self.extra = extra
def PrettyPrint(self, include_host=True, include_path=True):
"""Pretty-print this URL in a form more suitable for the graph.
This will elide very long paths and potentially puts newlines between parts
of long components. include_host and include_path determine whether to
include the host and path in the output.
Returns: the pretty-printed string."""
MAX_LEN = 30 # Maximum length of a line in the output.
parts = []
if include_host:
parts.append(self.host)
if include_path:
parts.append(self.path)
parts.append(self.extra)
lines = []
line = ''
for part in parts:
if len(part) > MAX_LEN:
part = part[0:MAX_LEN-3] + '...'
if len(line)+len(part) > MAX_LEN:
lines.append(line)
line = ''
line += part
if len(line) > 0:
lines.append(line)
return '\n'.join(lines)
class Edge(object):
"""Represents an edge in the history graph, connecting two pages.
If a link is traversed twice, it is one Edge with two entries in
the .transitions array."""
def __init__(self, src, dst):
self.src = src
self.dst = dst
self.transitions = []
def Transitions(self):
"""Return a dictionary mapping transition type -> occurences."""
all = {}
for trans in self.transitions:
all[trans] = all.get(trans, 0) + 1
# We currently don't use the chain type.
# TODO(evanm): make this a command-line option.
# if trans & 0x30000000 != 0:
# chain = ''
# if trans & 0x10000000:
# chain = 'start'
# if trans & 0x20000000:
# if len(chain) == 0:
# chain = 'end'
# else:
# chain = ''
# if len(chain) > 0:
# edge['chain'] = chain
return all
def ClusterBy(objs, pred):
"""Group a list of objects by a predicate.
Given a list of objects and a predicate over the objects, return a
dictionary mapping pred(obj) -> all objs with the same pred(obj)."""
clusters = {}
for obj in objs:
cluster = pred(obj)
clusters[cluster] = clusters.get(cluster, [])
clusters[cluster].append(obj)
return clusters
def EscapeDot(string):
"""Escape a string suitable for embedding in a graphviz graph."""
# TODO(evanm): this is likely not sufficient.
return string.replace('\n', '\\n')
class SQLite(object):
"""Trivial interface to executing SQLite queries.
Spawns a new process with each call."""
def __init__(self, file=None):
self.file = file
def Run(self, sql):
"""Execute |sql|, yielding each row of results as an array."""
subproc = subprocess.Popen(['sqlite', self.file],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
subproc.stdin.write('.mode tabs\n')
subproc.stdin.write(sql + ';')
subproc.stdin.close()
for line in subproc.stdout:
row = line.strip().split('\t')
yield row
def LoadHistory(filename):
db = SQLite(filename)
urls = {} # Map of urlid => url.
urls['0'] = URL('0', 'start') # Node name '0' is our special 'start' node.
for id, url in db.Run('SELECT id, url FROM urls'):
urls[id] = URL(id, url)
visiturlids = {} # Map of visitid => urlid.
visiturlids['0'] = '0' # '0' is our special 'start' node.
edges = {} # Map of urlid->urlid->Edge.
for src, dst, url, trans in db.Run('SELECT from_visit, id, url, transition '
'FROM visits ORDER BY id'):
visiturlids[dst] = url
src = visiturlids[src]
dst = visiturlids[dst]
edges[src] = edges.get(src, {})
edge = edges[src][dst] = edges[src].get(dst, Edge(src, dst))
# SQLite outputs transition values as signed integers, but they're really
# a bitfield. Below does "unsigned trans = static_cast<unsigned>(trans)".
trans = struct.unpack('I', struct.pack('i', int(trans)))[0]
edge.transitions.append(trans)
return urls, edges
def main():
urls, edges = LoadHistory(sys.argv[1])
print 'digraph G {'
print ' graph [rankdir=LR]' # Display left to right.
print ' node [shape=box]' # Display nodes as boxes.
print ' subgraph { rank=source; 0 [label="start"] }'
# Output all the nodes within graph clusters.
hosts = ClusterBy(urls.values(), lambda url: url.host)
for i, (host, urls) in enumerate(hosts.items()):
# Cluster all URLs under this host if it has more than one entry.
host_clustered = len(urls) > 1
if host_clustered:
print 'subgraph clusterhost%d {' % i
print ' label="%s"' % host
paths = ClusterBy(urls, lambda url: url.path)
for j, (path, urls) in enumerate(paths.items()):
# Cluster all URLs under this host if it has more than one entry.
path_clustered = host_clustered and len(urls) > 1
if path_clustered:
print ' subgraph cluster%d%d {' % (i, j)
print ' label="%s"' % path
for url in urls:
if url.id == '0': continue # We already output the special start node.
pretty = url.PrettyPrint(include_host=not host_clustered,
include_path=not path_clustered)
print ' %s [label="%s"]' % (url.id, EscapeDot(pretty))
if path_clustered:
print ' }'
if host_clustered:
print '}'
# Output all the edges between nodes.
for src, dsts in edges.items():
for dst, edge in dsts.items():
# Gather up all the transitions into the label.
label = [] # Label for the edge.
transitions = edge.Transitions()
for trans, count in transitions.items():
text = ''
if count > 1:
text = '%dx ' % count
base_type = trans & 0xFF
redir = (trans & 0xC0000000) != 0
start = (trans & 0x10000000) != 0
end = (trans & 0x20000000) != 0
if start or end:
if start:
text += '<'
if end:
text += '>'
text += ' '
if redir:
text += 'R '
text += TRANS_TYPES.get(base_type, 'trans%d' % base_type)
label.append(text)
if len(label) == 0:
continue
edgeattrs = [] # Graphviz attributes for the edge.
# If the edge is from the start and the transitions are fishy, make it
# display as a dotted line.
if src == '0' and len(transitions.keys()) == 1 and 0 in transitions:
edgeattrs.append('style=dashed')
if len(label) > 0:
edgeattrs.append('label="%s"' % EscapeDot('\n'.join(label)))
out = '%s -> %s' % (src, dst)
if len(edgeattrs) > 0:
out += ' [%s]' % ','.join(edgeattrs)
print out
print '}'
return 0
if __name__ == '__main__':
sys.exit(main())
|
e1ce1f32dae73160a5a64838c2353e329828cb3a
|
642ba1746fed0b722a127b8426eca987df6efc61
|
/share/lib/python/neuron/rxd/__init__.py
|
6685bd28fb73b00d87156ae494aa49c285d42ab6
|
[
"BSD-3-Clause"
] |
permissive
|
neuronsimulator/nrn
|
23781d978fe9253b0e3543f41e27252532b35459
|
b786c36d715ba0f6da1ba8bdf5d2338c939ecf51
|
refs/heads/master
| 2023-08-09T00:13:11.123525
| 2023-08-04T13:11:02
| 2023-08-04T13:11:02
| 71,627,569
| 313
| 171
|
NOASSERTION
| 2023-09-14T17:48:03
| 2016-10-22T08:47:37
|
C++
|
UTF-8
|
Python
| false
| false
| 4,240
|
py
|
__init__.py
|
from .rxdException import RxDException
# import sys
# if 'neuron.rxd' in sys.modules:
# raise RxDException('NEURON CRxD module cannot be used with NEURON RxD module.')
from . import rxd, constants
from .species import Species, Parameter, State
from .region import Region, Extracellular
from .rate import Rate
from .reaction import Reaction
from . import geometry
from .multiCompartmentReaction import MultiCompartmentReaction
from .rxd import re_init, set_solve_type, nthread
from .rxdmath import v
try:
from . import dimension3
except:
pass
from .rangevar import RangeVar
from .geometry import (
membrane,
inside,
Shell,
FractionalVolume,
FixedCrossSection,
FixedPerimeter,
ScalableBorder,
DistributedBoundary,
MultipleGeometry,
)
from .plugins import set_solver
# deprecated:
# from geometry import ConstantArea, ConstantVolume
# TODO: if we ever separate Parameter and State from species, then we need to
# rembember to call rxd._do_nbs_register()
def _model_view(tree):
from . import species
from neuron import h
species_dict = species._get_all_species()
if "TreeViewItem" not in dir(h):
return
if species_dict:
rxd_head = h.TreeViewItem(None, "Reaction Diffusion Items")
# TODO: do the species disappear if they go out of scope? or does this overcount?
rxd_species = h.TreeViewItem(
rxd_head, "%d Species/State/Parameter" % len(species_dict)
)
species_children = [
h.TreeViewItem(rxd_species, str(name)) for name in species_dict
]
rxd_reactions = h.TreeViewItem(
rxd_head,
"%d Reaction/Rate/MultiCompartmentReaction"
% len([r for r in rxd._all_reactions if r() is not None]),
)
tree.append(rxd_head)
def save_state():
"""return a bytestring representation of the current rxd state
Note: this is dependent on the order items were created."""
from . import species
import array
import itertools
import gzip
version = 0
state = []
num_species = 0
for sp in species._all_species:
s = sp()
if s is not None:
my_state = s._state
state.append(array.array("Q", [len(my_state)]).tobytes())
state.append(my_state)
num_species += 1
if num_species == 0:
return b""
data = gzip.compress(
array.array("Q", [num_species]).tobytes()
+ bytes(itertools.chain.from_iterable(state))
)
metadata = array.array("Q", [version, len(data)]).tobytes()
return metadata + data
def restore_state(oldstate):
"""restore rxd state from a bytestring
Note: this is dependent on the order items were created."""
from . import species
import array
import itertools
import gzip
if oldstate == b"":
for sp in species._all_species:
s = sp()
if s is not None:
raise RxDException("Invalid state data: inconsistent number of Species")
return
metadata = array.array("Q")
metadata.frombytes(oldstate[:16])
version, length = metadata
if version != 0:
raise RxDException("Unsupported state version")
# discard header and decompress remainder
if len(oldstate) != length + 16:
raise RxDException("Invalid state data: bad length")
oldstate = gzip.decompress(oldstate[16:])
metadata = array.array("Q")
metadata.frombytes(oldstate[:8])
num_species = metadata[0]
active_species = []
for sp in species._all_species:
s = sp()
if s is not None:
active_species.append(s)
if len(active_species) != num_species:
raise RxDException("Invalid state data: inconsistent number of Species")
position = 8
for sp in active_species:
data = array.array("d")
size_array = array.array("Q")
size_array.frombytes(oldstate[position : position + 8])
position += 8
size = size_array[0]
data.frombytes(oldstate[position : position + size])
position += size
sp._state = bytes(data)
if position != len(oldstate):
raise RxDException("Invalid state data: bad length")
|
0a974063c833d4e2535d6ace9dc2ee1109ed1cfd
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-VideoToolbox/PyObjCTest/test_vtutilities.py
|
0a2d398564a967e9b55b30e8e6d8ebb03683529a
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
test_vtutilities.py
|
import VideoToolbox
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestVTUtilities(TestCase):
@min_os_level("10.11")
def test_functions(self):
self.assertArgIsOut(VideoToolbox.VTCreateCGImageFromCVPixelBuffer, 2)
self.assertArgIsCFRetained(VideoToolbox.VTCreateCGImageFromCVPixelBuffer, 2)
@min_os_level("11.0")
def test_functions11_0(self):
VideoToolbox.VTRegisterSupplementalVideoDecoderIfAvailable
|
19e3038ac3810b1b8d257ac7280772dcfbbf376d
|
a53076722d9696422b2d9f8b6166c21ed7876607
|
/boltons/mathutils.py
|
bf7e95eff0af6d154c634fb5d2c743fa7a17ecc3
|
[
"BSD-3-Clause"
] |
permissive
|
mahmoud/boltons
|
e22ef2b596d64240a2cbd924aedaa8f9e17f0c8c
|
46599bc0d498dd8adc3aea833ce1445feed349dd
|
refs/heads/master
| 2023-09-04T07:43:37.357470
| 2023-05-06T17:25:38
| 2023-05-06T17:25:38
| 8,307,391
| 6,607
| 449
|
NOASSERTION
| 2023-09-07T02:09:03
| 2013-02-20T06:17:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,218
|
py
|
mathutils.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mahmoud Hashemi
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module provides useful math functions on top of Python's
built-in :mod:`math` module.
"""
from __future__ import division
from math import ceil as _ceil, floor as _floor
import bisect
import binascii
def clamp(x, lower=float('-inf'), upper=float('inf')):
"""Limit a value to a given range.
Args:
x (int or float): Number to be clamped.
lower (int or float): Minimum value for x.
upper (int or float): Maximum value for x.
The returned value is guaranteed to be between *lower* and
*upper*. Integers, floats, and other comparable types can be
mixed.
>>> clamp(1.0, 0, 5)
1.0
>>> clamp(-1.0, 0, 5)
0
>>> clamp(101.0, 0, 5)
5
>>> clamp(123, upper=5)
5
Similar to `numpy's clip`_ function.
.. _numpy's clip: http://docs.scipy.org/doc/numpy/reference/generated/numpy.clip.html
"""
if upper < lower:
raise ValueError('expected upper bound (%r) >= lower bound (%r)'
% (upper, lower))
return min(max(x, lower), upper)
def ceil(x, options=None):
"""Return the ceiling of *x*. If *options* is set, return the smallest
integer or float from *options* that is greater than or equal to
*x*.
Args:
x (int or float): Number to be tested.
options (iterable): Optional iterable of arbitrary numbers
(ints or floats).
>>> VALID_CABLE_CSA = [1.5, 2.5, 4, 6, 10, 25, 35, 50]
>>> ceil(3.5, options=VALID_CABLE_CSA)
4
>>> ceil(4, options=VALID_CABLE_CSA)
4
"""
if options is None:
return _ceil(x)
options = sorted(options)
i = bisect.bisect_left(options, x)
if i == len(options):
raise ValueError("no ceil options greater than or equal to: %r" % x)
return options[i]
def floor(x, options=None):
"""Return the floor of *x*. If *options* is set, return the largest
integer or float from *options* that is less than or equal to
*x*.
Args:
x (int or float): Number to be tested.
options (iterable): Optional iterable of arbitrary numbers
(ints or floats).
>>> VALID_CABLE_CSA = [1.5, 2.5, 4, 6, 10, 25, 35, 50]
>>> floor(3.5, options=VALID_CABLE_CSA)
2.5
>>> floor(2.5, options=VALID_CABLE_CSA)
2.5
"""
if options is None:
return _floor(x)
options = sorted(options)
i = bisect.bisect_right(options, x)
if not i:
raise ValueError("no floor options less than or equal to: %r" % x)
return options[i - 1]
try:
_int_types = (int, long)
bytes = str
except NameError:
# py3 has no long
_int_types = (int,)
unicode = str
class Bits(object):
'''
An immutable bit-string or bit-array object.
Provides list-like access to bits as bools,
as well as bitwise masking and shifting operators.
Bits also make it easy to convert between many
different useful representations:
* bytes -- good for serializing raw binary data
* int -- good for incrementing (e.g. to try all possible values)
* list of bools -- good for iterating over or treating as flags
* hex/bin string -- good for human readability
'''
__slots__ = ('val', 'len')
def __init__(self, val=0, len_=None):
if type(val) not in _int_types:
if type(val) is list:
val = ''.join(['1' if e else '0' for e in val])
if type(val) is bytes:
val = val.decode('ascii')
if type(val) is unicode:
if len_ is None:
len_ = len(val)
if val.startswith('0x'):
len_ = (len_ - 2) * 4
if val.startswith('0x'):
val = int(val, 16)
else:
if val:
val = int(val, 2)
else:
val = 0
if type(val) not in _int_types:
raise TypeError('initialized with bad type: {0}'.format(type(val).__name__))
if val < 0:
raise ValueError('Bits cannot represent negative values')
if len_ is None:
len_ = len('{0:b}'.format(val))
if val > 2 ** len_:
raise ValueError('value {0} cannot be represented with {1} bits'.format(val, len_))
self.val = val # data is stored internally as integer
self.len = len_
def __getitem__(self, k):
if type(k) is slice:
return Bits(self.as_bin()[k])
if type(k) is int:
if k >= self.len:
raise IndexError(k)
return bool((1 << (self.len - k - 1)) & self.val)
raise TypeError(type(k))
def __len__(self):
return self.len
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.val == other.val and self.len == other.len
def __or__(self, other):
if type(self) is not type(other):
return NotImplemented
return Bits(self.val | other.val, max(self.len, other.len))
def __and__(self, other):
if type(self) is not type(other):
return NotImplemented
return Bits(self.val & other.val, max(self.len, other.len))
def __lshift__(self, other):
return Bits(self.val << other, self.len + other)
def __rshift__(self, other):
return Bits(self.val >> other, self.len - other)
def __hash__(self):
return hash(self.val)
def as_list(self):
return [c == '1' for c in self.as_bin()]
def as_bin(self):
return '{{0:0{0}b}}'.format(self.len).format(self.val)
def as_hex(self):
# make template to pad out to number of bytes necessary to represent bits
tmpl = '%0{0}X'.format(2 * (self.len // 8 + ((self.len % 8) != 0)))
ret = tmpl % self.val
return ret
def as_int(self):
return self.val
def as_bytes(self):
return binascii.unhexlify(self.as_hex())
@classmethod
def from_list(cls, list_):
return cls(list_)
@classmethod
def from_bin(cls, bin):
return cls(bin)
@classmethod
def from_hex(cls, hex):
if isinstance(hex, bytes):
hex = hex.decode('ascii')
if not hex.startswith('0x'):
hex = '0x' + hex
return cls(hex)
@classmethod
def from_int(cls, int_, len_=None):
return cls(int_, len_)
@classmethod
def from_bytes(cls, bytes_):
return cls.from_hex(binascii.hexlify(bytes_))
def __repr__(self):
cn = self.__class__.__name__
return "{0}('{1}')".format(cn, self.as_bin())
|
9713ef426a033ae632fe55bcac69ec1f611721ea
|
ae31542273a142210a1ff30fb76ed9d45d38eba9
|
/src/test/isolation/specs/fk-contention.spec
|
f11a1d8cefc4795ab5a49925f06a9a3d34455efd
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"PostgreSQL",
"OpenSSL",
"LicenseRef-scancode-stream-benchmark",
"ISC",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-ssleay-windows",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
greenplum-db/gpdb
|
8334837bceb2d5d51a684500793d11b190117c6a
|
2c0f8f0fb24a2d7a7da114dc80f5f5a2712fca50
|
refs/heads/main
| 2023-08-22T02:03:03.806269
| 2023-08-21T22:59:53
| 2023-08-22T01:17:10
| 44,781,140
| 6,417
| 2,082
|
Apache-2.0
| 2023-09-14T20:33:42
| 2015-10-23T00:25:17
|
C
|
UTF-8
|
Python
| false
| false
| 330
|
spec
|
fk-contention.spec
|
setup
{
CREATE TABLE foo (a int PRIMARY KEY, b text);
CREATE TABLE bar (a int NOT NULL REFERENCES foo);
INSERT INTO foo VALUES (42);
}
teardown
{
DROP TABLE foo, bar;
}
session s1
setup { BEGIN; }
step ins { INSERT INTO bar VALUES (42); }
step com { COMMIT; }
session s2
step upd { UPDATE foo SET b = 'Hello World'; }
|
e7d1c24528e8cc6bfdc68e93175b0c3557c4c867
|
7d52b1853b4099ee96e5c44296d2ab30d9ccea47
|
/pkg/win32/mod_tools/tools/scripts/objloader.py
|
747e60fbe0c6091081aa362dbe24f158b03e108d
|
[
"MIT"
] |
permissive
|
kleientertainment/ds_mod_tools
|
858e81c2b48b2025516cc499a061248230689a50
|
3fe0cad89848c9d0fca8a4139a0491d188497636
|
refs/heads/master
| 2023-05-26T02:33:34.069420
| 2021-02-08T15:00:32
| 2021-02-08T15:00:32
| 13,852,231
| 129
| 37
|
NOASSERTION
| 2023-05-14T10:35:21
| 2013-10-25T05:25:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
objloader.py
|
# taken from the pygame wiki http://pygame.org/wiki/OBJFileLoader
class ObjFile:
def __init__(self, file):
"""Loads a Wavefront OBJ file. """
self.vertices = []
self.normals = []
self.texcoords = []
self.faces = []
material = None
for line in file:
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = map(float, values[1:4])
self.vertices.append(v)
elif values[0] == 'vn':
v = map(float, values[1:4])
self.normals.append(v)
elif values[0] == 'vt':
self.texcoords.append(map(float, values[1:3]))
elif values[0] in ('usemtl', 'usemat'):
material = values[1]
elif values[0] == 'mtllib':
pass
#self.mtl = MTL(values[1])
elif values[0] == 'f':
verts= []
for v in values[1:]:
vert = []
w = v.split('/')
vert.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
vert.append(int(w[1]))
else:
vert.append(0)
if len(w) >= 3 and len(w[2]) > 0:
vert.append(int(w[2]))
else:
vert.append(0)
verts.append(vert)
self.faces.append(verts)
|
2b7a0840987efe2c42cadeb602f1ceb8f3f797bd
|
f20112f340ac7a867a1534776b2635643f1538cc
|
/auth0/management/hooks.py
|
3c03aa5bfe325429c1a45db3e8a6e1489359820b
|
[
"MIT"
] |
permissive
|
auth0/auth0-python
|
213fe75f8a064ee505066e1343ad997bbcb229ff
|
8e3e51bc9ff0cb2f5f68eededf5c4d5b9f5ebb00
|
refs/heads/master
| 2023-09-04T12:40:21.796991
| 2023-09-04T09:58:44
| 2023-09-04T09:58:44
| 22,433,990
| 451
| 180
|
MIT
| 2023-09-12T15:39:20
| 2014-07-30T15:38:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,505
|
py
|
hooks.py
|
from __future__ import annotations
from typing import Any
from ..rest import RestClient, RestClientOptions
from ..types import TimeoutType
class Hooks:
"""Hooks endpoint implementation.
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
protocol (str, optional): Protocol to use when making requests.
(defaults to "https")
rest_options (RestClientOptions): Pass an instance of
RestClientOptions to configure additional RestClient
options, such as rate-limit retries.
(defaults to None)
"""
def __init__(
self,
domain: str,
token: str,
telemetry: bool = True,
timeout: TimeoutType = 5.0,
protocol: str = "https",
rest_options: RestClientOptions | None = None,
) -> None:
self.domain = domain
self.protocol = protocol
self.client = RestClient(
jwt=token, telemetry=telemetry, timeout=timeout, options=rest_options
)
def _url(self, id: str | None = None) -> str:
url = f"{self.protocol}://{self.domain}/api/v2/hooks"
if id is not None:
return f"{url}/{id}"
return url
def all(
self,
enabled: bool = True,
fields: list[str] | None = None,
include_fields: bool = True,
page: int | None = None,
per_page: int | None = None,
include_totals: bool = False,
):
"""Retrieves a list of all hooks.
Args:
enabled (bool, optional): If provided, retrieves hooks that match
the value, otherwise all hooks are retrieved.
fields (list, optional): A list of fields to include or exclude
(depending on include_fields) from the result, empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise
(defaults to true).
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Hooks/get_hooks
"""
params = {
"fields": fields and ",".join(fields) or None,
"include_fields": str(include_fields).lower(),
"page": page,
"per_page": per_page,
"include_totals": str(include_totals).lower(),
}
# since the default is True, this is here to disable the filter
if enabled is not None:
params["enabled"] = str(enabled).lower()
return self.client.get(self._url(), params=params)
def create(self, body: dict[str, Any]) -> dict[str, Any]:
"""Creates a new Hook.
Args:
body (dict): Attributes for the newly created hook,
See: https://auth0.com/docs/api/v2#!/Hooks/post_hooks
"""
return self.client.post(self._url(), data=body)
def get(self, id: str, fields: list[str] | None = None) -> dict[str, Any]:
"""Retrieves a hook by its ID.
Args:
id (str): The id of the hook to retrieve.
fields (list, optional): A list of fields to include or exclude
(depending on include_fields) from the result, empty to
retrieve all fields.
See: https://auth0.com/docs/api/management/v2#!/Hooks/get_hooks_by_id
"""
params = {
"fields": fields and ",".join(fields) or None,
}
return self.client.get(self._url(id), params=params)
def delete(self, id: str) -> Any:
"""Deletes a hook.
Args:
id (str): The id of the hook to delete.
See: https://auth0.com/docs/api/management/v2#!/Hooks/delete_hooks_by_id
"""
return self.client.delete(self._url(id))
def update(self, id: str, body: dict[str, Any]) -> dict[str, Any]:
"""Updates an existing hook.
Args:
id (str): The id of the hook to modify.
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/v2#!/Hooks/patch_hooks_by_id
"""
return self.client.patch(self._url(id), data=body)
def get_secrets(self, id: str) -> dict[str, Any]:
"""Retrieves a hook's secrets.
Args:
id (str): The id of the hook to retrieve secrets from.
See: https://auth0.com/docs/api/management/v2#!/Hooks/get_secrets
"""
return self.client.get(self._url("%s/secrets" % id))
def add_secrets(self, id: str, body: dict[str, Any]) -> dict[str, Any]:
"""Add one or more secrets for an existing hook.
Args:
id (str): The id of the hook to add secrets to.
body (dict): Dict of key-value pairs where the value must be a string.
See: https://auth0.com/docs/api/management/v2#!/Hooks/post_secrets
"""
return self.client.post(self._url("%s/secrets" % id), data=body)
def delete_secrets(self, id: str, body: list[str]) -> Any:
"""Delete one or more existing secrets for an existing hook.
Args:
id (str): The id of the hook to add secrets to.
body (list): List of secret names to delete.
See: https://auth0.com/docs/api/management/v2#!/Hooks/delete_secrets
"""
return self.client.delete(self._url("%s/secrets" % id), data=body)
def update_secrets(self, id: str, body: dict[str, Any]) -> dict[str, Any]:
"""Update one or more existing secrets for an existing hook.
Args:
id (str): The id of the hook to add secrets to.
body (dict): Dict of key-value pairs where the value must be a string.
See: https://auth0.com/docs/api/management/v2#!/Hooks/patch_secrets
"""
return self.client.patch(self._url("%s/secrets" % id), data=body)
|
b0be80ce23025d3dd82a05b018669bc490780e92
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/ssoadmin/outputs.py
|
c59d0107a1daa222b58c45001d5bb0088f7e023e
|
[
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 7,409
|
py
|
outputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'CustomerManagedPolicyAttachmentCustomerManagedPolicyReference',
'InstanceAccessControlAttributesAttribute',
'InstanceAccessControlAttributesAttributeValue',
'PermissionsBoundaryAttachmentPermissionsBoundary',
'PermissionsBoundaryAttachmentPermissionsBoundaryCustomerManagedPolicyReference',
]
@pulumi.output_type
class CustomerManagedPolicyAttachmentCustomerManagedPolicyReference(dict):
def __init__(__self__, *,
name: str,
path: Optional[str] = None):
"""
:param str name: Name of the customer managed IAM Policy to be attached.
:param str path: The path to the IAM policy to be attached. The default is `/`. See [IAM Identifiers](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) for more information.
"""
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the customer managed IAM Policy to be attached.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
The path to the IAM policy to be attached. The default is `/`. See [IAM Identifiers](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) for more information.
"""
return pulumi.get(self, "path")
@pulumi.output_type
class InstanceAccessControlAttributesAttribute(dict):
def __init__(__self__, *,
key: str,
values: Sequence['outputs.InstanceAccessControlAttributesAttributeValue']):
"""
:param str key: The name of the attribute associated with your identities in your identity source. This is used to map a specified attribute in your identity source with an attribute in AWS SSO.
:param Sequence['InstanceAccessControlAttributesAttributeValueArgs'] values: The value used for mapping a specified attribute to an identity source. See AccessControlAttributeValue
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The name of the attribute associated with your identities in your identity source. This is used to map a specified attribute in your identity source with an attribute in AWS SSO.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def values(self) -> Sequence['outputs.InstanceAccessControlAttributesAttributeValue']:
"""
The value used for mapping a specified attribute to an identity source. See AccessControlAttributeValue
"""
return pulumi.get(self, "values")
@pulumi.output_type
class InstanceAccessControlAttributesAttributeValue(dict):
def __init__(__self__, *,
sources: Sequence[str]):
"""
:param Sequence[str] sources: The identity source to use when mapping a specified attribute to AWS SSO.
"""
pulumi.set(__self__, "sources", sources)
@property
@pulumi.getter
def sources(self) -> Sequence[str]:
"""
The identity source to use when mapping a specified attribute to AWS SSO.
"""
return pulumi.get(self, "sources")
@pulumi.output_type
class PermissionsBoundaryAttachmentPermissionsBoundary(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customerManagedPolicyReference":
suggest = "customer_managed_policy_reference"
elif key == "managedPolicyArn":
suggest = "managed_policy_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsBoundaryAttachmentPermissionsBoundary. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsBoundaryAttachmentPermissionsBoundary.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsBoundaryAttachmentPermissionsBoundary.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
customer_managed_policy_reference: Optional['outputs.PermissionsBoundaryAttachmentPermissionsBoundaryCustomerManagedPolicyReference'] = None,
managed_policy_arn: Optional[str] = None):
"""
:param 'PermissionsBoundaryAttachmentPermissionsBoundaryCustomerManagedPolicyReferenceArgs' customer_managed_policy_reference: Specifies the name and path of a customer managed policy. See below.
:param str managed_policy_arn: AWS-managed IAM policy ARN to use as the permissions boundary.
"""
if customer_managed_policy_reference is not None:
pulumi.set(__self__, "customer_managed_policy_reference", customer_managed_policy_reference)
if managed_policy_arn is not None:
pulumi.set(__self__, "managed_policy_arn", managed_policy_arn)
@property
@pulumi.getter(name="customerManagedPolicyReference")
def customer_managed_policy_reference(self) -> Optional['outputs.PermissionsBoundaryAttachmentPermissionsBoundaryCustomerManagedPolicyReference']:
"""
Specifies the name and path of a customer managed policy. See below.
"""
return pulumi.get(self, "customer_managed_policy_reference")
@property
@pulumi.getter(name="managedPolicyArn")
def managed_policy_arn(self) -> Optional[str]:
"""
AWS-managed IAM policy ARN to use as the permissions boundary.
"""
return pulumi.get(self, "managed_policy_arn")
@pulumi.output_type
class PermissionsBoundaryAttachmentPermissionsBoundaryCustomerManagedPolicyReference(dict):
def __init__(__self__, *,
name: str,
path: Optional[str] = None):
"""
:param str name: Name of the customer managed IAM Policy to be attached.
:param str path: The path to the IAM policy to be attached. The default is `/`. See [IAM Identifiers](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) for more information.
"""
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the customer managed IAM Policy to be attached.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
The path to the IAM policy to be attached. The default is `/`. See [IAM Identifiers](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) for more information.
"""
return pulumi.get(self, "path")
|
fa5b02bd157c1896aea4691981044e10a6c0682a
|
0bfdffafb30b8c7df81bba17585378fc8b4c1f49
|
/safe_control_gym/envs/env_wrappers/vectorized_env/dummy_vec_env.py
|
34870f1f4fb23157074c65610a435dbb9c076f67
|
[
"MIT"
] |
permissive
|
utiasDSL/safe-control-gym
|
9c50bca04a255831108f386d5b228bcaa653fc75
|
140ed17dbd91d73a1f6537520b610adff732b9aa
|
refs/heads/main
| 2023-07-26T13:25:07.592019
| 2023-06-13T18:50:09
| 2023-06-13T18:50:09
| 392,837,160
| 387
| 94
|
MIT
| 2023-07-02T11:39:33
| 2021-08-04T22:14:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,022
|
py
|
dummy_vec_env.py
|
'''Dummy Vectorized Environments.'''
import copy
import numpy as np
from safe_control_gym.envs.env_wrappers.vectorized_env.vec_env import VecEnv
from safe_control_gym.envs.env_wrappers.vectorized_env.vec_env_utils import _flatten_obs
from safe_control_gym.utils.utils import get_random_state, set_random_state
class DummyVecEnv(VecEnv):
'''Single thread env (allow multiple envs sequentially).'''
def __init__(self,
env_fns
):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
self.actions = None
self.closed = False
def step_async(self,
actions
):
self.actions = actions
def step_wait(self):
results = []
for i in range(self.num_envs):
obs, rew, done, info = self.envs[i].step(self.actions[i])
if done:
end_obs = copy.deepcopy(obs)
end_info = copy.deepcopy(info)
obs, info = self.envs[i].reset()
info['terminal_observation'] = end_obs
info['terminal_info'] = end_info
results.append([obs, rew, done, info])
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.array(rews), np.array(dones), {'n': infos}
def reset(self):
results = []
for env in self.envs:
results.append(env.reset())
obs, infos = zip(*results)
return _flatten_obs(obs), {'n': infos}
def close(self):
for env in self.envs:
env.close()
if self.viewer is not None:
self.viewer.close()
self.closed = True
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self,
mode='human'
):
if self.num_envs == 1:
return self.envs[0].render(mode=mode)
else:
return super().render(mode=mode)
def get_env_random_state(self):
return [get_random_state()]
def set_env_random_state(self,
worker_random_states
):
set_random_state(worker_random_states[0])
def get_attr(self,
attr_name,
indices=None
):
'''Return attribute from vectorized environment (see base class).'''
target_envs = self._get_target_envs(indices)
return [getattr(env_i, attr_name) for env_i in target_envs]
def set_attr(self,
attr_name,
values,
indices=None
):
'''Set attribute inside vectorized environments (see base class).'''
target_envs = self._get_target_envs(indices)
assert len(target_envs) == len(values)
for env_i, val_i in zip(target_envs, values):
setattr(env_i, attr_name, val_i)
def env_method(self,
method_name,
method_args=None,
method_kwargs=None,
indices=None):
'''Call instance methods of vectorized environments.'''
target_envs = self._get_target_envs(indices)
if method_args is None:
method_args = [[]] * len(target_envs)
if method_kwargs is None:
method_kwargs = [{}] * len(target_envs)
assert len(target_envs) == len(method_args) and len(target_envs) == len(
method_kwargs)
return [
getattr(env_i, method_name)(*args, **kwargs) for env_i, args, kwargs
in zip(target_envs, method_args, method_kwargs)
]
def _get_target_envs(self,
indices
):
assert indices is None or sorted(
indices) == indices, 'Indices must be ordered'
indices = self._get_indices(indices)
return [self.envs[i] for i in indices]
|
7bca2d82d70996b12fb8679008a541e6a802a5e7
|
c235b42b85d08249f34009f6718bf421174300b7
|
/setup.py
|
12a5fab1bce104efb7469f203bc8481342ea3599
|
[
"Apache-2.0"
] |
permissive
|
vinayak-mehta/nbcommands
|
ba4c64083397663e9ad24734bcc1fdc7c9a9c2e5
|
5bc82cf9ca5ce03d0d11524d9cd081f1bc4cd768
|
refs/heads/master
| 2021-09-23T20:31:35.168360
| 2021-06-20T20:46:09
| 2021-06-20T20:46:09
| 218,704,085
| 214
| 10
|
Apache-2.0
| 2021-09-20T13:54:03
| 2019-10-31T07:01:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
setup.py
|
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, "nbcommands", "__version__.py"), "r") as f:
exec(f.read(), about)
with open("README.md", "r") as f:
readme = f.read()
requires = [
"black>=19.10b0",
"Click>=7.0",
"colorama>=0.4.1",
"nbformat>=4.4.0",
"Pygments>=2.4.2",
]
dev_requires = ["Sphinx>=2.2.1"]
dev_requires = dev_requires + requires
def setup_package():
metadata = dict(
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=readme,
long_description_content_type="text/markdown",
url=about["__url__"],
author=about["__author__"],
author_email=about["__author_email__"],
license=about["__license__"],
packages=find_packages(exclude=("tests",)),
install_requires=requires,
extras_require={"dev": dev_requires},
entry_points={
"console_scripts": [
"nbblack = nbcommands._black:_black",
"nbtouch = nbcommands._touch:touch",
"nbgrep = nbcommands._grep:grep",
"nbhead = nbcommands._head:head",
"nbtail = nbcommands._tail:tail",
"nbcat = nbcommands._cat:cat",
"nbless = nbcommands._less:less",
]
},
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
984393d54acaf8345ee102fbc7204d1cf18edb0d
|
a02ccb5dff094fad8bcd691dda234d50ff768299
|
/tools/pytorch-quantization/tests/quant_instancenorm_test.py
|
118fd0dea4389e7b4837b9d1131afe3c4d97ad68
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"ISC",
"BSD-2-Clause"
] |
permissive
|
NVIDIA/TensorRT
|
5520d5a6a5926a2b30dbdd2c5b2e4dfe6d1b429b
|
a167852705d74bcc619d8fad0af4b9e4d84472fc
|
refs/heads/release/8.6
| 2023-07-29T05:39:45.688091
| 2023-06-09T22:29:09
| 2023-06-09T23:04:18
| 184,657,328
| 8,026
| 2,096
|
Apache-2.0
| 2023-09-13T17:30:16
| 2019-05-02T22:02:08
|
C++
|
UTF-8
|
Python
| false
| false
| 9,591
|
py
|
quant_instancenorm_test.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of QuantInstanceNorm module.
Mose tests check the functionality of all the combinations in Quant instancenorm against the corresponding functionalities in
tensor_quant. There are tests for all the three QuantInstaceNorm1D, QuantInstanceNorm2D, and QuantInstanceNorm3D
"""
import pytest
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from pytorch_quantization import tensor_quant
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
from pytorch_quantization import utils as quant_utils
from pytorch_quantization.nn.modules import quant_instancenorm
#import tests.utils as test_utils
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.backends.cudnn.deterministic = True
np.random.seed(1234)
# pylint:disable=missing-docstring, no-self-use
NUM_CHANNELS = 15
class TestQuantInstanceNorm1D():
def test_no_quant(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm1d(NUM_CHANNELS, affine=True)
quant_instancenorm_object.input_quantizer.disable()
test_input = torch.randn(8, NUM_CHANNELS, 128)
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(test_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm1d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor())
test_input = torch.randn(8, NUM_CHANNELS, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm1d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor(axis=(1)))
test_input = torch.randn(8, NUM_CHANNELS, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input,
torch.abs(test_input).max(0, keepdim=True)[0].max(2, keepdim=True)[0])
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantInstanceNorm2D():
def test_no_quant(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm2d(NUM_CHANNELS, affine=True)
quant_instancenorm_object.input_quantizer.disable()
test_input = torch.randn(8, NUM_CHANNELS, 128, 128)
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(test_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm2d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor())
test_input = torch.randn(8, NUM_CHANNELS, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm2d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor(axis=(1)))
test_input = torch.randn(8, NUM_CHANNELS, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input,
torch.abs(test_input).max(0, keepdim=True)[0].max(2, keepdim=True)[0].max(3, keepdim=True)[0])
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantInstanceNorm3D():
def test_no_quant(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm3d(NUM_CHANNELS, affine=True)
quant_instancenorm_object.input_quantizer.disable()
test_input = torch.randn(8, NUM_CHANNELS, 128, 128, 128)
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(test_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm3d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor())
test_input = torch.randn(8, NUM_CHANNELS, 128, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm3d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor(axis=(1)))
test_input = torch.randn(8, NUM_CHANNELS, 128, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input,
torch.abs(test_input).max(0, keepdim=True)[0].max(2, keepdim=True)[0]
.max(3, keepdim=True)[0].max(4, keepdim=True)[0])
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
|
ff903800d5c98fb7e834c2dfc4270abb54dc2cd1
|
b538bd01143101731766551f15eb39e6655799b6
|
/controle_estoque/mainclientes.py
|
a0497e8e32c53b8bc96cc4acea9d76119f3969b0
|
[
"MIT"
] |
permissive
|
andrersp/controleEstoque
|
aa3eecbce19d3c87c33b5513298c98ba99d508d0
|
08bd223c6491ce0994f5eaccd82cc3a32aafcd60
|
refs/heads/master
| 2023-09-03T11:12:23.625788
| 2023-08-31T11:19:20
| 2023-08-31T11:19:20
| 168,267,539
| 178
| 118
|
MIT
| 2023-08-31T11:19:21
| 2019-01-30T02:44:52
|
Python
|
UTF-8
|
Python
| false
| false
| 8,751
|
py
|
mainclientes.py
|
# -*- coding: utf-8 -*-
import re
from functools import partial
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView
from Views.mainClientes import Ui_ct_MainClientes
from Views.formClientes import Ui_ct_FormClientes
from Crud.CrudCliente import CrudCliente
from Crud.CrudVenda import CrudVenda
class MainClientes(Ui_ct_MainClientes, Ui_ct_FormClientes):
def mainclientes(self, frame):
super(MainClientes, self).setMainClientes(frame)
self.frameMainClientes.show()
# Icones Botoes
self.IconeBotaoMenu(self.bt_BuscaClientes,
self.resourcepath('Images/search.png'))
self.IconeBotaoMenu(self.bt_PrintRelatCliente,
self.resourcepath('Images/gtk-print.png'))
self.IconeBotaoForm(self.bt_AddNovoClientes,
self.resourcepath('Images/addCliente.svg'))
# Botao Adicionar Cliente / FormClientes
self.bt_AddNovoClientes.clicked.connect(self.FormClientes)
# Tamanho colunas tabela
self.tb_Clientes.blockSignals(True)
self.tb_Clientes.setColumnHidden(0, True)
self.tb_Clientes.setColumnWidth(1, 40)
self.tb_Clientes.setColumnWidth(2, 350)
self.tb_Clientes.setColumnWidth(3, 235)
self.tb_Clientes.setColumnWidth(4, 265)
self.tb_Clientes.setColumnWidth(5, 40)
# Populando Tabela
self.TabelaClientes()
# Busca CLiente por nome
self.tx_BuscaClientes.textEdited.connect(self.TabelaClientes)
# Botao imprimir
self.bt_PrintRelatCliente.clicked.connect(self.imprimirCliente)
# Dados Tabela
def TabelaClientes(self):
lista = CrudCliente()
lista.nome = self.tx_BuscaClientes.text()
lista.listaCliente()
i = 0
while self.tb_Clientes.rowCount() > 0:
self.tb_Clientes.removeRow(0)
if len(lista.nome) >= 1:
while i < len(lista.nome):
self.tb_Clientes.insertRow(i)
self.TabelaStatus(self.tb_Clientes, i, 0,
self.StatusEntrega(1))
self.TabelaID(self.tb_Clientes, i, 1, lista.id[i])
self.TabelaNomeTelefone(self.tb_Clientes, i, 2,
lista.nome[i],
lista.sobrenome[i])
self.TabelaNomeTelefone(self.tb_Clientes, i, 3,
self.formatoNumTelefone(
lista.celular[i]),
self.formatoNumTelefone(
lista.telefone[i]))
self.TabelaNomeTelefone(self.tb_Clientes, i, 4,
lista.email[i], "")
# Sinal click tabela
self.botaoTabela(self.tb_Clientes, i, 5, partial(
self.SelectCliente, lista.id[i]), "#005099")
i += 1
pass
# Seleciona Cliente por ID
def SelectCliente(self, valor):
id = valor
self.FormClientes()
self.tx_Id.setText(str(id))
busca = CrudCliente()
busca.id = self.tx_Id.text()
busca.selectClienteId()
self.tx_NomeFantasia.setText(busca.nome)
self.tx_RazaoSocial.setText(busca.sobrenome)
self.tx_cnpj.setText(busca.cpf)
self.tx_InscEstadual.setText(busca.rg)
self.tx_Celular.setText(busca.celular)
self.tx_Telefone.setText(busca.telefone)
self.tx_Email.setText(busca.email)
self.tx_Obs.setText(busca.obs)
self.tx_Cep.setText(busca.cep)
self.tx_Endereco.setText(busca.endereco)
self.tx_Numero.setText(busca.numero)
self.tx_Bairro.setText(busca.bairro)
self.tx_Cidade.setText(busca.cidade)
self.tx_Estado.setText(busca.estado)
# Limpando tabela Histórico de Compras
for row in range(self.tb_Historico.rowCount()):
self.tb_Historico.removeRow(row)
# Histórico de Compras cliente
total = '0.00'
lista = CrudVenda()
lista.idCliente = valor
lista.selectVendaCliente()
i = 0
while i < len(lista.dataEmissao):
# print row
self.tb_Historico.insertRow(i)
self.conteudoTabela(
self.tb_Historico, i, 0, str(lista.dataEmissao[i]))
self.conteudoTabela(
self.tb_Historico, i, 1, str(lista.dataEntrega[i]))
self.conteudoTabela(
self.tb_Historico, i, 2, str(lista.valorTotal[i]))
total = float(lista.valorTotal[i]) + float(total)
i += 1
self.lb_TotalHistorico.setText(format(float(total), ".2f"))
i += 1
pass
# Frame Formulário Produtos
def FormClientes(self):
# self.DesativaBotaoProdutos()
self.LimpaFrame(self.ct_containerClientes)
super(MainClientes, self).setFormClientes(self.ct_containerClientes)
self.fr_FormClientes.show()
# ICone Botoes
self.IconeBotaoMenu(self.bt_Salvar,
self.resourcepath('Images/salvar.png'))
self.IconeBotaoMenu(self.bt_Voltar,
self.resourcepath('Images/cancelar.png'))
self.IconeBotaoMenu(self.bt_BuscaCep,
self.resourcepath('Images/find.png'))
# Checando se existe ID válido
self.IdCheckCliente()
# Tamanho tabela Histórico
self.tb_Historico.setColumnWidth(0, 100)
self.tb_Historico.setColumnWidth(1, 100)
self.tb_Historico.setColumnWidth(2, 100)
self.tb_Historico.setColumnHidden(3, True)
# Botão Voltar
self.bt_Voltar.clicked.connect(self.janelaClientes)
# Botao Salvar
self.bt_Salvar.clicked.connect(self.VerificaInputClientes)
# Buscar Cep
self.bt_BuscaCep.clicked.connect(self.buscarCepCliente)
self.tx_Cep.returnPressed.connect(self.buscarCepCliente)
pass
# checando campo Id se é Edicao ou Novo Cliente
def IdCheckCliente(self):
if not self.tx_Id.text():
busca = CrudCliente()
self.tx_Id.setText(str(busca.lastIdCliente()))
pass
# Valida Inputs
def VerificaInputClientes(self):
if not self.tx_NomeFantasia.text():
self.tx_NomeFantasia.setFocus()
elif not self.tx_Celular.text():
self.tx_Celular.setFocus()
else:
self.CadCliente()
def CadCliente(self):
INSERI = CrudCliente()
INSERI.id = self.tx_Id.text()
INSERI.nome = self.tx_NomeFantasia.text().upper()
INSERI.sobrenome = self.tx_RazaoSocial.text().upper()
INSERI.cpf = re.sub(
'[^[0-9]', '', self.tx_cnpj.text())
INSERI.rg = re.sub(
'[^[0-9]', '', self.tx_InscEstadual.text())
INSERI.celular = re.sub(
'[^[0-9]', '', self.tx_Celular.text())
INSERI.telefone = re.sub(
'[^[0-9]', '', self.tx_Telefone.text())
INSERI.email = self.tx_Email.text()
INSERI.obs = self.tx_Obs.text().upper()
INSERI.cep = re.sub(
'[^[0-9]', '', self.tx_Cep.text())
INSERI.endereco = self.tx_Endereco.text().upper()
INSERI.numero = self.tx_Numero.text()
INSERI.bairro = self.tx_Bairro.text().upper()
INSERI.cidade = self.tx_Cidade.text().upper()
INSERI.estado = self.tx_Estado.text().upper()
INSERI.inseriCliente()
self.janelaClientes()
pass
# Imprimindo
def imprimirCliente(self):
self.documento = QWebEngineView()
headertable = ["Cod", "Nome ", "Telefone", "Email"]
cod = []
nome = []
telefone = []
email = []
i = 0
for row in range(self.tb_Clientes.rowCount()):
cod.append(self.tb_Clientes.cellWidget(i, 1).text())
nome.append(self.tb_Clientes.cellWidget(i, 2).text())
telefone.append(self.tb_Clientes.cellWidget(i, 3).text())
email.append(self.tb_Clientes.cellWidget(i, 4).text())
self.renderTemplate(
"clientes.html",
estilo=self.resourcepath('Template/estilo.css'),
titulo="LISTAGEM CLIENTES",
headertable=headertable,
codcliente=cod,
nome=nome,
telefoneFornecedor=telefone,
emailFornecedor=email
)
self.documento.load(QUrl.fromLocalFile(
self.resourcepath("report.html")))
self.documento.loadFinished['bool'].connect(self.previaImpressao)
|
9adb7d6d608678c3f1caf968b1abe4c64d592104
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/pyaf/TS/Temporal_Hierarchy.py
|
427957b919ec9a9bff820ce5077a14093f14a2cd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 7,373
|
py
|
Temporal_Hierarchy.py
|
# Copyright (C) 2016 Antoine Carme <Antoine.Carme@outlook.com>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from . import DateTime_Functions as dtfunc
from . import SignalHierarchy as sighier
from . import Utils as tsutil
class cTemporalHierarchy (sighier.cSignalHierarchy):
def __init__(self):
sighier.cSignalHierarchy.__init__(self)
self.mHorizons = {}
def discard_nans_in_aggregate_signals(self):
return True
def get_specific_date_column_for_signal(self, level, signal):
# only for temporal hierarchies
lPeriod = self.mPeriods[level]
lPrefix = "TH"
lName = lPrefix + "_" + lPeriod + "_start"
return lName
def get_beginning_of_period(self, x, iPeriod):
# add this utility function.
lHelper = dtfunc.cDateTime_Helper()
return lHelper.get_beginning_of_period(iPeriod, x)
def aggregate_time_columns(self, level, signal, iAllLevelsDataset):
cols = [col1 for col1 in sorted(self.mStructure[level][signal])]
iAllLevelsDataset[signal] = iAllLevelsDataset[cols[0]]
for col in cols[1:]:
# logical or
new_col = iAllLevelsDataset[[signal, col]].apply(lambda x : x[1] if (x[0] is None) else x[0], axis = 0)
iAllLevelsDataset[signal] = new_col
def create_all_levels_dataset(self, df):
df = df.reset_index(drop = True)
df = self.add_temporal_data(df)
return df
def get_horizon(self, level, signal):
# only for temporal hierarchies
lPeriod = self.mPeriods[level]
return self.mHorizons[lPeriod]
def add_temporal_data(self, df):
logger = tsutil.get_pyaf_hierarchical_logger();
N = len(df.columns)
df1 = df[[self.mDateColumn, self.mSignal]].copy()
df1.set_index(self.mDateColumn, inplace=True, drop=False)
# df1.info()
lPrefix = "TH"
df_resampled = {}
for lPeriod in self.mPeriods:
lName = lPrefix + "_" + lPeriod + "_start"
df_resampled[lPeriod] = df1[self.mSignal].resample(lPeriod).sum().reset_index()
df_resampled[lPeriod].columns = [lName , self.mSignal]
# synchronize
lShift = df_resampled[lPeriod][lName].iloc[0] - df[self.mDateColumn].iloc[0]
df_resampled[lPeriod][lName] = df_resampled[lPeriod][lName] - lShift
for lPeriod in self.mPeriods:
lName = lPrefix + "_" + lPeriod + "_start"
WData = df_resampled[lPeriod]
# df[[self.mDateColumn , self.mSignal]].info()
# WData.info()
# tsutil.print_pyaf_detailed_info("DATE_PERIOD", list(WData[lName])[:30])
df_merge = df[[self.mDateColumn , self.mSignal]].merge(WData, left_on=self.mDateColumn,right_on=lName, how='left', suffixes=('_x', '_Period'), sort=True)
df[self.mSignal + '_' + lPeriod] = df_merge[self.mSignal + '_Period']
df[lName] = df_merge[lName]
logger.info("FORECASTING_HIERARCHICAL_TEMPORAL_LEVEL " + str((lPeriod, lName, list(df.columns), WData.shape)))
return df
def compute_horizons(self, df):
df = df.reset_index(drop = True)
logger = tsutil.get_pyaf_hierarchical_logger();
N = len(df.columns)
df1 = df[[self.mDateColumn, self.mSignal]].copy()
df1.set_index(self.mDateColumn, inplace=True, drop=False)
# df1.info()
lPrefix = "TH"
lHelper = dtfunc.cDateTime_Helper()
lBaseFreq = lHelper.computeTimeFrequency_in_seconds(df1[self.mDateColumn])
df_resampled = {}
for lPeriod in self.mPeriods:
lName = lPrefix + "_" + lPeriod + "_start"
df_resampled[lPeriod] = df1[self.mSignal].resample(lPeriod).sum().reset_index()
df_resampled[lPeriod].columns = [lName , self.mSignal]
# synchronize
lShift = df_resampled[lPeriod][lName].iloc[0] - df[self.mDateColumn].iloc[0]
df_resampled[lPeriod][lName] = df_resampled[lPeriod][lName] - lShift
lDate_Period = df_resampled[lPeriod][lName]
# tsutil.print_pyaf_detailed_info("AS_FREQ" , lPeriod , lDate_Period.head())
lNewFreq = lHelper.computeTimeFrequency_in_seconds(lDate_Period)
lHorizon = int(self.mHorizon * lBaseFreq / lNewFreq)
lHorizon = max(1, lHorizon)
# tsutil.print_pyaf_detailed_info("AS_FREQ_2" , lPeriod , lBaseFreq , lNewFreq , lHorizon)
self.mHorizons[lPeriod] = lHorizon
logger.info("FORECASTING_HIERARCHICAL_TEMPORAL_HORIZONS_FIRST_RESAMPLED_DATA " + str(lPeriod) + " " + str(df_resampled[lPeriod].head(5).to_dict()) )
logger.info("FORECASTING_HIERARCHICAL_TEMPORAL_HORIZONS " + str(self.mHorizons));
def checkPhysicalTime(self):
logger = tsutil.get_pyaf_hierarchical_logger();
lHelper = dtfunc.cDateTime_Helper()
lIsPhysical = lHelper.isPhysicalTime(self.mTrainingDataset[self.mDateColumn])
if(not lIsPhysical):
raise tsutil.PyAF_Error('TIME_HIERARCHY_PHYSICAL_TIME_NEEDED ' + str(self.mDateColumn) + " " + str(self.mTrainingDataset[self.mDateColumn].dtype))
def check_increasing_periods(self):
logger = tsutil.get_pyaf_hierarchical_logger();
lHelper = dtfunc.cDateTime_Helper()
lSeconds = {}
for lPeriod in self.mPeriods:
lSeconds[lPeriod] = lHelper.get_period_length_in_seconds(lPeriod)
logger.info("FORECASTING_HIERARCHICAL_TEMPORAL_FREQUENCIES " + str(lSeconds) )
lPreviousPeriod = lSeconds[ self.mPeriods[0] ]
lTimeFreqInSeconds = lHelper.computeTimeFrequency_in_seconds(self.mTrainingDataset[self.mDateColumn])
if(lTimeFreqInSeconds > lPreviousPeriod):
raise tsutil.PyAF_Error('TIME_HIERARCHY_PHYSICAL_TIME_RESOLUTION_TOO_LOW_FOR_THIS_PERIOD ' + str(self.mDateColumn) + " " + str(lTimeFreqInSeconds) +
" " + self.mPeriods[0] + " " + str(lPreviousPeriod))
for lPeriod in self.mPeriods[1:]:
if(lSeconds[lPeriod] < lPreviousPeriod):
raise tsutil.PyAF_Error('TIME_HIERARCHY_NOT_MONOTONOUS ' + str(self.mPeriods));
lPreviousPeriod = lSeconds[lPeriod]
def create_HierarchicalStructure(self):
self.mPeriods = self.mHierarchy['Periods']
self.checkPhysicalTime()
self.check_increasing_periods()
self.compute_horizons(self.mTrainingDataset)
# self.add_temporal_data(self.mTrainingDataset)
self.mLevels = list(range(len(self.mPeriods)));
self.mStructure = {};
for (lLevel, lPeriod) in enumerate(self.mPeriods):
self.mStructure[lLevel] = {}
for (lLevel, lPeriod) in enumerate(self.mPeriods):
self.mStructure[lLevel][self.mSignal + '_' + lPeriod] = set()
if(lLevel > 0):
self.mStructure[lLevel][self.mSignal + '_' + lPeriod] = set([self.mSignal + '_' + self.mPeriods[lLevel - 1]])
# tsutil.print_pyaf_detailed_info(self.mStructure);
pass
|
994027343e76062af9b1ac06e858671dc70cbb5f
|
03a7f7a7eb8c16b537b65ec21f465bb0335bc3b8
|
/pythran/tests/test_nogil.py
|
5197781e84a1a5d46679908618e2a738aa8e2123
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
serge-sans-paille/pythran
|
a0e22af1ac5e1f34f3f29dce36502f4a897b5186
|
d8ab07b4b3b690f50603cb4d08ba303d3af18b90
|
refs/heads/master
| 2023-09-01T16:04:03.289285
| 2023-08-30T09:13:58
| 2023-08-31T08:03:22
| 4,479,494
| 1,882
| 200
|
BSD-3-Clause
| 2023-09-06T20:08:10
| 2012-05-29T08:02:14
|
C++
|
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
test_nogil.py
|
from pythran.tests import TestEnv
from pythran.typing import List, Set, Dict, NDArray
import numpy as np
class TestNoGil(TestEnv):
def test_list_param(self):
code="""
def list_param(l):
return l, sum(i*j for i in l for j in l)
"""
self.run_test(code, list(range(30)), list_param=[List[int]],
thread_count=4)
def test_set_param(self):
code="""
def set_param(l):
return {sum(l), sum(i*j for i in l for j in l)}, l
"""
self.run_test(code, set(range(30)), set_param=[Set[int]],
thread_count=4)
def test_dict_param(self):
code="""
def dict_param(l):
return {sum(i-j for i in l.keys() for j in l.values()): l}, l
"""
self.run_test(code, dict(zip(range(30), range(30))),
dict_param=[Dict[int, int]],
thread_count=4)
def test_ndarray_param(self):
code="""
import numpy as np
def ndarray_param(l):
return np.array([i*j for i in l for j in l]), l
"""
self.run_test(code, np.ones(100, dtype=int),
ndarray_param=[NDArray[int, :]],
thread_count=4)
|
2cf33ea676a7358b95db94ddadfb2777498bcedc
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/test/unit/tool_util/data/test_tool_data.py
|
deff586e924235d6b21bc3c5de8c15653b226f21
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
test_tool_data.py
|
LOC_ALPHA_CONTENTS_V2 = """
data1 data1name ${__HERE__}/data1/entry.txt
data2 data2name ${__HERE__}/data2/entry.txt
data3 data3name ${__HERE__}/data3/entry.txt
"""
def test_data_tables_as_dictionary(tdt_manager):
assert "testalpha" in tdt_manager.data_tables
assert "testdelta" not in tdt_manager.data_tables
def test_to_dict(tdt_manager):
as_dict = tdt_manager.to_dict()
assert "testalpha" in as_dict
assert "testdelta" not in as_dict
testalpha_as_dict = as_dict["testalpha"]
assert "columns" in testalpha_as_dict
def test_index(tdt_manager):
index = tdt_manager.index()
assert len(index.__root__) >= 1
entry = index.find_entry("testalpha")
assert entry
entry = index.find_entry("testomega")
assert not entry
def test_reload(tdt_manager, tmp_path):
assert len(tdt_manager["testalpha"].data) == 2
loc1 = tmp_path / "testalpha.loc"
loc1.write_text(LOC_ALPHA_CONTENTS_V2)
tdt_manager.reload_tables()
assert len(tdt_manager["testalpha"].data) == 3
def test_reload_by_path(tdt_manager, tmp_path):
assert len(tdt_manager["testalpha"].data) == 2
loc1 = tmp_path / "testalpha.loc"
loc1.write_text(LOC_ALPHA_CONTENTS_V2)
tdt_manager.reload_tables(path=str(loc1))
assert len(tdt_manager["testalpha"].data) == 3
def test_reload_by_name(tdt_manager, tmp_path):
assert len(tdt_manager["testalpha"].data) == 2
loc1 = tmp_path / "testalpha.loc"
loc1.write_text(LOC_ALPHA_CONTENTS_V2)
tdt_manager.reload_tables("testalpha")
assert len(tdt_manager["testalpha"].data) == 3
def test_merging_tables(merged_tdt_manager):
assert len(merged_tdt_manager["testbeta"].data) == 2
def test_to_json(merged_tdt_manager, tmp_path):
json_path = tmp_path / "as_json.json"
assert not json_path.exists()
merged_tdt_manager.to_json(json_path)
assert json_path.exists()
|
2b80837429278296a0d25a16a77d1d5188a676be
|
71fe2884262c26d9f7f61fea003700ca37f5513c
|
/ArticutAPI/Toolkit/graphQL.py
|
1735df78021e77a1f3ef8e62f82e4123d59e5c25
|
[
"MIT"
] |
permissive
|
Droidtown/ArticutAPI
|
aa522577f92d86454bf38573326f2f31214057ed
|
e5bdec66ddfd040c6e3df9d00fd28a2f6fc70124
|
refs/heads/master
| 2023-08-22T07:51:19.033369
| 2023-08-21T03:15:20
| 2023-08-21T03:15:20
| 183,543,778
| 418
| 39
|
MIT
| 2021-07-09T11:06:57
| 2019-04-26T02:38:33
|
Python
|
UTF-8
|
Python
| false
| false
| 12,197
|
py
|
graphQL.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from graphene.types.resolver import dict_resolver
import graphene
import sys
import os
import json
import re
from pprint import pprint
# Articut Result 檔案路徑
resultFilePath = ''
"""
Articut GraphQL Schema
"""
class Persons(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
text = graphene.String()
pos_ = graphene.String()
tag_ = graphene.String()
class Nouns(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
text = graphene.String()
pos_ = graphene.String()
tag_ = graphene.String()
class Numbers(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
text = graphene.String()
pos_ = graphene.String()
tag_ = graphene.String()
class Sites(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
text = graphene.String()
pos_ = graphene.String()
tag_ = graphene.String()
class Userdefined(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
text = graphene.String()
pos_ = graphene.String()
tag_ = graphene.String()
class Meta(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
lang = graphene.String()
description = graphene.String()
class Tokens(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
text = graphene.String()
pos_ = graphene.String()
tag_ = graphene.String()
isStop = graphene.Boolean()
isEntity = graphene.Boolean()
isVerb = graphene.Boolean()
isTime = graphene.Boolean()
isClause = graphene.Boolean()
isKnowledge = graphene.Boolean()
class Ents(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
persons = graphene.List(Persons)
nouns = graphene.List(Nouns)
numbers = graphene.List(Numbers)
sites = graphene.List(Sites)
#userdefined = graphene.List(Userdefined)
class Doc(graphene.ObjectType):
class Meta:
default_resolver = dict_resolver
text = graphene.String()
tokens = graphene.List(Tokens)
ents = graphene.Field(Ents)
class Nlp(graphene.ObjectType):
meta = graphene.Field(Meta)
doc = graphene.Field(Doc)
class Query(graphene.ObjectType):
nlp = graphene.Field(
Nlp,
filepath = graphene.String(),
model = graphene.String()
)
def resolve_nlp(self, info, filepath, model):
if model != "TW":
return Nlp(
meta = {
"lang": model,
"description": 'Articut-GraphQL Model Unsupported.'
}
)
if filepath[-5:] == '.json':
try:
with open(filepath, 'r', encoding='utf-8') as resultFile:
result = json.loads(resultFile.read())
textTagLIST = posList2TextTag(result["result_pos"])
return Nlp(
meta = {
"lang": model,
"description": 'Articut GraphQL Query Result.'
},
doc = {
"text": result["result_segmentation"].replace('/', ''),
"tokens": getTokens(textTagLIST),
"ents": getEnts(textTagLIST)
}
)
except Exception as e:
print('[Articut-GraphQL ERROR] {}'.format(e))
return Nlp(
meta = {
"lang": model,
"description": 'Articut-GraphQL Error.'
}
)
"""
Used by ArticutAPI.py
"""
class GraphQL():
def query(self, filePath, query="""
{
meta {
lang
description
}
doc {
text
tokens {
text
pos_
tag_
isStop
isEntity
isVerb
isTime
isClause
isKnowledge
}
ents {
persons {
text
pos_
tag_
}
nouns {
text
pos_
tag_
}
numbers {
text
pos_
tag_
}
sites {
text
pos_
tag_
}
}
}
}"""):
query = """{\n nlp(filepath: "{{filePath}}", model: "TW") {{query}}\n}""".replace('{{filePath}}', filePath).replace('{{query}}', query)
result = graphene.Schema(query=Query).execute(query)
return json.loads(json.dumps({"data": result.data}))
"""
將 result_pos 拆開成 [{"text", "tag_", "pos_"} ...]
"""
def posList2TextTag(posLIST):
textTagLIST = []
textPosPat = re.compile("<[^>]*?>.*?</[^>]*?>")
posPat = re.compile("(?<=>).*?</[^>]*?>")
posLIST.reverse()
for pos in posLIST:
if pos[0] == '<' and pos[-1] == '>':
textPosLIST = [p.group(0) for p in reversed(list(textPosPat.finditer(pos)))]
for t in textPosLIST:
textLIST = [tp.group(0).split("</") for tp in posPat.finditer(t)]
textTagLIST.append({
"text": textLIST[0][0],
"tag_": textLIST[0][1][:-1],
"pos_": pos2UniversalPOS(textLIST[0][1][:-1])
})
else:
textTagLIST.append({
"text": pos,
"tag_": 'PUNCTUATION',
"pos_": 'PUNCT'
})
textTagLIST.reverse()
return textTagLIST
"""
Articut POS 轉換 Universal Part-of-speech Tags
"""
def pos2UniversalPOS(pos):
if pos in ['FUNC_inner']:
return 'ADP'
if pos in ['FUNC_determiner']:
return 'DET'
if pos in ['AUX', 'MODAL']:
return 'AUX'
if pos in ['ASPECT', 'FUNC_negation']:
return 'PART'
if pos in ['FUNC_inter', 'FUNC_conjunction']:
return 'CONJ'
if pos in ['ENTITY_person', 'ENTITY_pronoun']:
return 'PERSON'
if pos in ['TIME_justtime', 'RANGE_period']:
return 'TIME'
if pos in ['QUANTIFIER', 'ENTITY_measurement']:
return 'QUANTITY'
if pos in ['MODIFIER', 'MODIFIER_color', 'FUNC_modifierHead']:
return 'ADJ'
if pos in ['LOCATION', 'RANGE_locality', 'KNOWLEDGE_place', 'KNOWLEDGE_addTW', 'KNOWLEDGE_route']:
return 'LOC'
if pos in ['VerbP', 'ACTION_verb', 'ACTION_lightVerb', 'ACTION_quantifiedVerb']:
return 'VERB'
if pos in ['TIME_day', 'TIME_week', 'TIME_month', 'TIME_season', 'TIME_year', 'TIME_decade', 'TIME_holiday']:
return 'DATE'
if pos in ['IDIOM', 'ENTITY_noun', 'ENTITY_nouny', 'ENTITY_oov', 'ENTITY_NP', 'ENTITY_nounHead', 'ENTITY_num', 'ENTITY_classifier', 'ENTITY_possessive']:
return 'NOUN'
return 'OTHER' # ['UserDefined', 'CLAUSE_AnotAQ', 'CLAUSE_YesNoQ', 'CLAUSE_WhoQ', 'CLAUSE_WhatQ', 'CLAUSE_WhereQ', 'CLAUSE_WhenQ', 'CLAUSE_HowQ', 'CLAUSE_WhyQ', 'CLAUSE_Particle', 'KNOWLEDGE_url']
"""
Articut-GraphQL Content
"""
def getTokens(textTagLIST):
resultLIST = []
for textTag in textTagLIST:
resultDICT = {
"text": textTag["text"],
"tag_": textTag["tag_"],
"pos_": textTag["pos_"],
"isStop": posIsStop(textTag["tag_"]),
"isEntity": posIsEntity(textTag["tag_"]),
"isVerb": posIsVerb(textTag["tag_"]),
"isTime": posIsTime(textTag["tag_"]),
"isClause": posIsClause(textTag["tag_"]),
"isKnowledge": posIsKnowledge(textTag["tag_"])
}
resultLIST.append(resultDICT)
return resultLIST
def getEnts(textTagLIST):
resultDICT = {
"persons": getPersons(textTagLIST),
"nouns": getNouns(textTagLIST),
"numbers": getNumbers(textTagLIST),
"sites": getSites(textTagLIST)
#"userdefined": getUserdefined(textTagLIST)
}
return resultDICT
"""
Articut-GraphQL Function
"""
def posIsStop(pos):
if pos in ['ACTION_lightVerb', 'FUNC_determiner', 'FUNC_modifierHead', 'FUNC_negation', 'FUNC_conjunction', 'RANGE_locality', 'RANGE_period']:
return True
return False
def posIsEntity(pos):
if pos in ['ENTITY_num', 'ENTITY_classifier', 'ENTITY_measurement', 'ENTITY_person', 'ENTITY_pronoun', 'ENTITY_possessive', 'ENTITY_noun', 'ENTITY_nounHead', 'ENTITY_nouny', 'ENTITY_oov', 'ENTITY_NP']:
return True
return False
def posIsVerb(pos):
if pos in ['ACTION_verb', 'ACTION_quantifiedVerb', 'VerbP']:
return True
return False
def posIsTime(pos):
if pos in ['TIME_justtime', 'TIME_holiday', 'TIME_day', 'TIME_week', 'TIME_month', 'TIME_season', 'TIME_year', 'TIME_decade']:
return True
return False
def posIsClause(pos):
if pos in ['CLAUSE_AnotAQ', 'CLAUSE_YesNoQ', 'CLAUSE_WhoQ', 'CLAUSE_WhatQ', 'CLAUSE_WhereQ', 'CLAUSE_WhenQ', 'CLAUSE_HowQ', 'CLAUSE_WhyQ', 'CLAUSE_Particle']:
return True
return False
def posIsKnowledge(pos):
if pos in ['KNOWLEDGE_addTW', 'KNOWLEDGE_url', 'KNOWLEDGE_place', 'KNOWLEDGE_route', 'LOCATION', 'UserDefined']:
return True
return False
def getPersons(textTagLIST):
resultLIST = []
for textTag in textTagLIST:
if textTag["tag_"] in ['ENTITY_person', 'ENTITY_pronoun']:
resultLIST.append(textTag)
return resultLIST
def getNouns(textTagLIST):
resultLIST = []
for textTag in textTagLIST:
if textTag["tag_"] in ['ENTITY_noun', 'ENTITY_nounHead', 'ENTITY_nouny', 'ENTITY_oov', 'ENTITY_NP']:
resultLIST.append(textTag)
return resultLIST
def getNumbers(textTagLIST):
resultLIST = []
for textTag in textTagLIST:
if textTag["tag_"] in ['ENTITY_num', 'ENTITY_classifier', 'ENTITY_measurement']:
resultLIST.append(textTag)
return resultLIST
def getSites(textTagLIST):
resultLIST = []
for textTag in textTagLIST:
if textTag["tag_"] in ['KNOWLEDGE_addTW', 'KNOWLEDGE_place', 'LOCATION', 'KNOWLEDGE_route']:
resultLIST.append(textTag)
return resultLIST
# Uncompleted
def getUserdefined(textTagLIST):
resultLIST = []
for textTag in textTagLIST:
if textTag["tag_"] in ['']:
resultLIST.append(textTag)
return resultLIST
"""
Used by python ArticutGraphQL.py articutResult.json
Starlette server (http://0.0.0.0:8000)
"""
def serverStart():
from starlette.applications import Starlette
from starlette.routing import Router
from starlette.routing import Route
import uvicorn
app = Router([Route('/', endpoint=graphQL, methods=['GET', 'POST'])])
uvicorn.run(app, host='0.0.0.0', port=8000)
return None
async def graphQL(request):
from starlette.templating import Jinja2Templates
from starlette.status import HTTP_400_BAD_REQUEST
from starlette.responses import PlainTextResponse
from starlette.responses import JSONResponse
if request.method == 'POST':
content_type = request.headers.get("Content-Type", "")
if content_type == 'application/json':
data = await request.json()
else:
return PlainTextResponse('Bad Request!', status_code=HTTP_400_BAD_REQUEST)
try:
query = data["query"]
variables = data.get("variables")
except KeyError:
return PlainTextResponse('Bad Request!', status_code=HTTP_400_BAD_REQUEST)
result = graphene.Schema(query=Query).execute(query, variables=variables)
return JSONResponse({"data": result.data})
else:
return Jinja2Templates(directory='Toolkit').TemplateResponse('graphQL.html', {
"request": request,
"resultFilePath": resultFilePath
})
if __name__ == '__main__':
if len(sys.argv) >= 2:
resultFilePath = sys.argv[1]
if os.path.isfile(resultFilePath):
serverStart()
else:
print('{} 檔案不存在!'.format(resultFilePath))
else:
print('請輸入斷詞結果檔案路徑,例:python ArticutGraphQL.py articutResult.json')
|
61a1b51d8bacc1a6a84db0022e599cae582689e2
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/company/migrations/0039_auto_20210701_0509.py
|
74dadcc34006929be88e9e497fdbd3d31aef32cf
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 889
|
py
|
0039_auto_20210701_0509.py
|
# Generated by Django 3.2.4 on 2021-07-01 05:09
import InvenTree.fields
from django.db import migrations
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('common', '0004_inventreesetting'),
('company', '0038_manufacturerpartparameter'),
]
operations = [
migrations.AlterField(
model_name='supplierpricebreak',
name='price',
field=InvenTree.fields.InvenTreeModelMoneyField(currency_choices=[], decimal_places=4, default_currency='', help_text='Unit price at specified quantity', max_digits=19, null=True, verbose_name='Price'),
),
migrations.AlterField(
model_name='supplierpricebreak',
name='price_currency',
field=djmoney.models.fields.CurrencyField(choices=[], default='', editable=False, max_length=3),
),
]
|
91eb458be4f96e2a49944bad90ef33f2f95a1dbc
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/_private/thirdparty/pathspec/compat.py
|
37c6480510f4918fec453ad960be4e7e168dde7d
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 777
|
py
|
compat.py
|
# encoding: utf-8
"""
This module provides compatibility between Python 2 and 3. Hardly
anything is used by this project to constitute including `six`_.
.. _`six`: http://pythonhosted.org/six
"""
import sys
if sys.version_info[0] < 3:
# Python 2.
unicode = unicode
string_types = (basestring,)
from collections import Iterable
from itertools import izip_longest
def iterkeys(mapping):
return mapping.iterkeys()
else:
# Python 3.
unicode = str
string_types = (unicode,)
from collections.abc import Iterable
from itertools import zip_longest as izip_longest
def iterkeys(mapping):
return mapping.keys()
try:
# Python 3.6+.
from collections.abc import Collection
except ImportError:
# Python 2.7 - 3.5.
from collections import Container as Collection
|
8ef61df68c801f45286a88ba3aab52b65f78cdc5
|
62179a165ec620ba967dbc20016e890978fbff50
|
/examples/tensorflow/classification/main.py
|
b6e63e81feef8579ad57a30257409ad58e973b17
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 13,557
|
py
|
main.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import sys
from pathlib import Path
import tensorflow as tf
import tensorflow_addons as tfa
from examples.common.paths import configure_paths
from examples.common.sample_config import create_sample_config
from examples.tensorflow.classification.datasets.builder import DatasetBuilder
from examples.tensorflow.common.argparser import get_common_argument_parser
from examples.tensorflow.common.callbacks import get_callbacks
from examples.tensorflow.common.callbacks import get_progress_bar
from examples.tensorflow.common.distributed import get_distribution_strategy
from examples.tensorflow.common.experimental_patcher import patch_if_experimental_quantization
from examples.tensorflow.common.export import export_model
from examples.tensorflow.common.logger import logger
from examples.tensorflow.common.model_loader import get_model
from examples.tensorflow.common.optimizer import build_optimizer
from examples.tensorflow.common.scheduler import build_scheduler
from examples.tensorflow.common.utils import SummaryWriter
from examples.tensorflow.common.utils import create_code_snapshot
from examples.tensorflow.common.utils import get_run_name
from examples.tensorflow.common.utils import get_saving_parameters
from examples.tensorflow.common.utils import print_args
from examples.tensorflow.common.utils import serialize_cli_args
from examples.tensorflow.common.utils import serialize_config
from examples.tensorflow.common.utils import set_seed
from examples.tensorflow.common.utils import write_metrics
from nncf.config.utils import is_accuracy_aware_training
from nncf.tensorflow import create_compression_callbacks
from nncf.tensorflow.helpers.model_creation import create_compressed_model
from nncf.tensorflow.helpers.model_manager import TFModelManager
from nncf.tensorflow.initialization import register_default_init_args
from nncf.tensorflow.utils.state import TFCompressionState
from nncf.tensorflow.utils.state import TFCompressionStateLoader
def get_argument_parser():
parser = get_common_argument_parser(precision=False, save_checkpoint_freq=False, print_freq=False)
parser.add_argument(
"--dataset", help="Dataset to use.", choices=["imagenet2012", "cifar100", "cifar10"], default=None
)
parser.add_argument(
"--test-every-n-epochs", default=1, type=int, help="Enables running validation every given number of epochs"
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pretrained models from the tf.keras.applications",
action="store_true",
)
return parser
def get_config_from_argv(argv, parser):
args = parser.parse_args(args=argv)
config = create_sample_config(args, parser)
configure_paths(config, get_run_name(config))
return config
def get_dataset_builders(config, num_devices, one_hot=True):
image_size = config.input_info.sample_size[-2]
train_builder = DatasetBuilder(
config, image_size=image_size, num_devices=num_devices, one_hot=one_hot, is_train=True
)
val_builder = DatasetBuilder(
config, image_size=image_size, num_devices=num_devices, one_hot=one_hot, is_train=False
)
return train_builder, val_builder
def get_num_classes(dataset):
if "imagenet2012" in dataset:
num_classes = 1000
elif dataset == "cifar100":
num_classes = 100
elif dataset == "cifar10":
num_classes = 10
else:
num_classes = 1000
logger.info("The sample is started with {} classes".format(num_classes))
return num_classes
def load_checkpoint(checkpoint, ckpt_path):
logger.info("Load from checkpoint is enabled.")
if tf.io.gfile.isdir(ckpt_path):
path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path)
logger.info("Latest checkpoint: {}".format(path_to_checkpoint))
else:
path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None
logger.info("Provided checkpoint: {}".format(path_to_checkpoint))
if not path_to_checkpoint:
logger.info("No checkpoint detected.")
if ckpt_path:
raise RuntimeError(f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}")
logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint))
status = checkpoint.restore(path_to_checkpoint)
status.expect_partial()
logger.info("Completed loading from checkpoint.")
def resume_from_checkpoint(checkpoint, ckpt_path, steps_per_epoch):
load_checkpoint(checkpoint, ckpt_path)
initial_step = checkpoint.model.optimizer.iterations.numpy()
initial_epoch = initial_step // steps_per_epoch
logger.info("Resuming from epoch %d", initial_epoch)
return initial_epoch
def load_compression_state(ckpt_path: str):
checkpoint = tf.train.Checkpoint(compression_state=TFCompressionStateLoader())
load_checkpoint(checkpoint, ckpt_path)
return checkpoint.compression_state.state
def get_model_accuracy(model_fn, model_params, nncf_config, validation_dataset, validation_steps):
with TFModelManager(model_fn, nncf_config, **model_params) as model:
model.compile(metrics=[tf.keras.metrics.CategoricalAccuracy(name="acc@1")])
results = model.evaluate(validation_dataset, steps=validation_steps, return_dict=True)
return 100 * results["acc@1"]
def run(config):
if config.disable_tensor_float_32_execution:
tf.config.experimental.enable_tensor_float_32_execution(False)
strategy = get_distribution_strategy(config)
if config.metrics_dump is not None:
write_metrics(0, config.metrics_dump)
set_seed(config)
model_fn, model_params = get_model(
config.model,
input_shape=config.get("input_info", {}).get("sample_size", None),
num_classes=config.get("num_classes", get_num_classes(config.dataset)),
pretrained=config.get("pretrained", False),
weights=config.get("weights", None),
)
train_builder, validation_builder = get_dataset_builders(config, strategy.num_replicas_in_sync)
train_dataset, validation_dataset = train_builder.build(), validation_builder.build()
nncf_config = register_default_init_args(
nncf_config=config.nncf_config, data_loader=train_dataset, batch_size=train_builder.global_batch_size
)
train_epochs = config.epochs
train_steps = train_builder.steps_per_epoch
validation_steps = validation_builder.steps_per_epoch
resume_training = config.ckpt_path is not None
compression_state = None
if resume_training:
compression_state = load_compression_state(config.ckpt_path)
if "train" in config.mode and is_accuracy_aware_training(config):
uncompressed_model_accuracy = get_model_accuracy(
model_fn, model_params, nncf_config, validation_dataset, validation_steps
)
with TFModelManager(model_fn, nncf_config, **model_params) as model:
with strategy.scope():
compression_ctrl, compress_model = create_compressed_model(model, nncf_config, compression_state)
compression_callbacks = create_compression_callbacks(compression_ctrl, log_dir=config.log_dir)
scheduler = build_scheduler(config=config, steps_per_epoch=train_steps)
optimizer = build_optimizer(config=config, scheduler=scheduler)
loss_obj = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1)
compress_model.add_loss(compression_ctrl.loss)
metrics = [
tf.keras.metrics.CategoricalAccuracy(name="acc@1"),
tf.keras.metrics.TopKCategoricalAccuracy(k=5, name="acc@5"),
tfa.metrics.MeanMetricWrapper(loss_obj, name="ce_loss"),
tfa.metrics.MeanMetricWrapper(compression_ctrl.loss, name="cr_loss"),
]
compress_model.compile(
optimizer=optimizer, loss=loss_obj, metrics=metrics, run_eagerly=config.get("eager_mode", False)
)
compress_model.summary()
checkpoint = tf.train.Checkpoint(
model=compress_model, compression_state=TFCompressionState(compression_ctrl)
)
initial_epoch = 0
if resume_training:
initial_epoch = resume_from_checkpoint(
checkpoint=checkpoint, ckpt_path=config.ckpt_path, steps_per_epoch=train_steps
)
callbacks = get_callbacks(
include_tensorboard=True,
track_lr=True,
profile_batch=0,
initial_step=initial_epoch * train_steps,
log_dir=config.log_dir,
ckpt_dir=config.checkpoint_save_dir,
checkpoint=checkpoint,
)
callbacks.append(get_progress_bar(stateful_metrics=["loss"] + [metric.name for metric in metrics]))
callbacks.extend(compression_callbacks)
validation_kwargs = {
"validation_data": validation_dataset,
"validation_steps": validation_steps,
"validation_freq": config.test_every_n_epochs,
}
if "train" in config.mode:
if is_accuracy_aware_training(config):
logger.info("starting an accuracy-aware training loop...")
result_dict_to_val_metric_fn = lambda results: 100 * results["acc@1"]
statistics = compress_model.accuracy_aware_fit(
train_dataset,
compression_ctrl,
uncompressed_model_accuracy=uncompressed_model_accuracy,
nncf_config=config.nncf_config,
callbacks=callbacks,
initial_epoch=initial_epoch,
steps_per_epoch=train_steps,
tensorboard_writer=SummaryWriter(config.log_dir, "accuracy_aware_training"),
log_dir=config.log_dir,
result_dict_to_val_metric_fn=result_dict_to_val_metric_fn,
**validation_kwargs,
)
logger.info(f"Compressed model statistics:\n{statistics.to_str()}")
else:
logger.info("training...")
compress_model.fit(
train_dataset,
epochs=train_epochs,
steps_per_epoch=train_steps,
initial_epoch=initial_epoch,
callbacks=callbacks,
**validation_kwargs,
)
logger.info("evaluation...")
statistics = compression_ctrl.statistics()
logger.info(statistics.to_str())
eval_model = compress_model
results = eval_model.evaluate(
validation_dataset,
steps=validation_steps,
callbacks=[get_progress_bar(stateful_metrics=["loss"] + [metric.name for metric in metrics])],
verbose=1,
)
if config.metrics_dump is not None:
write_metrics(results[1], config.metrics_dump)
if "export" in config.mode:
save_path, save_format = get_saving_parameters(config)
export_model(compression_ctrl.strip(), save_path, save_format)
logger.info("Saved to {}".format(save_path))
def export(config):
model, model_params = get_model(
config.model,
input_shape=config.get("input_info", {}).get("sample_size", None),
num_classes=config.get("num_classes", get_num_classes(config.dataset)),
pretrained=config.get("pretrained", False),
weights=config.get("weights", None),
)
model = model(**model_params)
compression_state = None
if config.ckpt_path:
compression_state = load_compression_state(config.ckpt_path)
compression_ctrl, compress_model = create_compressed_model(model, config.nncf_config, compression_state)
metrics = [
tf.keras.metrics.CategoricalAccuracy(name="acc@1"),
tf.keras.metrics.TopKCategoricalAccuracy(k=5, name="acc@5"),
]
loss_obj = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1)
compress_model.compile(loss=loss_obj, metrics=metrics)
compress_model.summary()
checkpoint = tf.train.Checkpoint(model=compress_model, compression_state=TFCompressionState(compression_ctrl))
if config.ckpt_path is not None:
load_checkpoint(checkpoint=checkpoint, ckpt_path=config.ckpt_path)
save_path, save_format = get_saving_parameters(config)
export_model(compression_ctrl.strip(), save_path, save_format)
logger.info("Saved to {}".format(save_path))
def main(argv):
parser = get_argument_parser()
config = get_config_from_argv(argv, parser)
print_args(config)
patch_if_experimental_quantization(config.nncf_config)
serialize_config(config.nncf_config, config.log_dir)
serialize_cli_args(parser, argv, config.log_dir)
nncf_root = Path(__file__).absolute().parents[3]
create_code_snapshot(nncf_root, osp.join(config.log_dir, "snapshot.tar.gz"))
if "train" in config.mode or "test" in config.mode:
run(config)
elif "export" in config.mode:
export(config)
if __name__ == "__main__":
main(sys.argv[1:])
|
c546a7a874ab569e4c5cc217157101c94398e3fe
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/gogogate2/test_sensor.py
|
8df88b2b4b78d2c4af1ff6b62b50e7adcf98a40e
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 11,084
|
py
|
test_sensor.py
|
"""Tests for the GogoGate2 component."""
from datetime import timedelta
from unittest.mock import MagicMock, patch
from ismartgate import GogoGate2Api, ISmartGateApi
from ismartgate.common import (
DoorMode,
DoorStatus,
GogoGate2ActivateResponse,
GogoGate2Door,
GogoGate2InfoResponse,
ISmartGateDoor,
ISmartGateInfoResponse,
Network,
Outputs,
Wifi,
)
from homeassistant.components.gogogate2.const import DEVICE_TYPE_ISMARTGATE, DOMAIN
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_DEVICE,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_USERNAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.util.dt import utcnow
from tests.common import MockConfigEntry, async_fire_time_changed
def _mocked_gogogate_sensor_response(battery_level: int, temperature: float):
return GogoGate2InfoResponse(
user="user1",
gogogatename="gogogatename0",
model="",
apiversion="",
remoteaccessenabled=False,
remoteaccess="abc123.blah.blah",
firmwareversion="",
apicode="",
door1=GogoGate2Door(
door_id=1,
permission=True,
name="Door1",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.OPENED,
sensor=True,
sensorid="ABCD",
camera=False,
events=2,
temperature=temperature,
voltage=battery_level,
),
door2=GogoGate2Door(
door_id=2,
permission=True,
name="Door2",
gate=True,
mode=DoorMode.GARAGE,
status=DoorStatus.UNDEFINED,
sensor=True,
sensorid="WIRE",
camera=False,
events=0,
temperature=temperature,
voltage=battery_level,
),
door3=GogoGate2Door(
door_id=3,
permission=True,
name="Door3",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.UNDEFINED,
sensor=True,
sensorid=None,
camera=False,
events=0,
temperature=temperature,
voltage=battery_level,
),
outputs=Outputs(output1=True, output2=False, output3=True),
network=Network(ip=""),
wifi=Wifi(SSID="", linkquality="", signal=""),
)
def _mocked_ismartgate_sensor_response(battery_level: int, temperature: float):
return ISmartGateInfoResponse(
user="user1",
ismartgatename="ismartgatename0",
model="ismartgatePRO",
apiversion="",
remoteaccessenabled=False,
remoteaccess="abc321.blah.blah",
firmwareversion="555",
pin=123,
lang="en",
newfirmware=False,
door1=ISmartGateDoor(
door_id=1,
permission=True,
name="Door1",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.CLOSED,
sensor=True,
sensorid="ABCD",
camera=False,
events=2,
temperature=temperature,
enabled=True,
apicode="apicode0",
customimage=False,
voltage=battery_level,
),
door2=ISmartGateDoor(
door_id=2,
permission=True,
name="Door2",
gate=True,
mode=DoorMode.GARAGE,
status=DoorStatus.CLOSED,
sensor=True,
sensorid="WIRE",
camera=False,
events=2,
temperature=temperature,
enabled=True,
apicode="apicode0",
customimage=False,
voltage=battery_level,
),
door3=ISmartGateDoor(
door_id=3,
permission=True,
name="Door3",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.UNDEFINED,
sensor=True,
sensorid=None,
camera=False,
events=0,
temperature=temperature,
enabled=True,
apicode="apicode0",
customimage=False,
voltage=battery_level,
),
network=Network(ip=""),
wifi=Wifi(SSID="", linkquality="", signal=""),
)
@patch("homeassistant.components.gogogate2.common.GogoGate2Api")
async def test_sensor_update(gogogate2api_mock, hass: HomeAssistant) -> None:
"""Test data update."""
bat_attributes = {
"device_class": "battery",
"door_id": 1,
"friendly_name": "Door1 battery",
"sensor_id": "ABCD",
"state_class": "measurement",
"unit_of_measurement": "%",
}
temp_attributes = {
"device_class": "temperature",
"door_id": 1,
"friendly_name": "Door1 temperature",
"sensor_id": "ABCD",
"unit_of_measurement": "°C",
"state_class": "measurement",
}
api = MagicMock(GogoGate2Api)
api.async_activate.return_value = GogoGate2ActivateResponse(result=True)
api.async_info.return_value = _mocked_gogogate_sensor_response(25, 7.0)
gogogate2api_mock.return_value = api
config_entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data={
CONF_IP_ADDRESS: "127.0.0.1",
CONF_USERNAME: "admin",
CONF_PASSWORD: "password",
},
)
config_entry.add_to_hass(hass)
assert hass.states.get("cover.door1") is None
assert hass.states.get("cover.door2") is None
assert hass.states.get("cover.door3") is None
assert hass.states.get("sensor.door1_battery") is None
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door3_battery") is None
assert hass.states.get("sensor.door1_temperature") is None
assert hass.states.get("sensor.door2_temperature") is None
assert hass.states.get("sensor.door3_temperature") is None
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("cover.door1")
assert hass.states.get("cover.door2")
assert hass.states.get("cover.door3")
assert hass.states.get("sensor.door1_battery").state == "25"
assert dict(hass.states.get("sensor.door1_battery").attributes) == bat_attributes
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door1_temperature").state == "7.0"
assert (
dict(hass.states.get("sensor.door1_temperature").attributes) == temp_attributes
)
assert hass.states.get("sensor.door2_temperature") is None
assert hass.states.get("sensor.door3_temperature") is None
api.async_info.return_value = _mocked_gogogate_sensor_response(40, 10.0)
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == "40"
assert hass.states.get("sensor.door1_temperature").state == "10.0"
api.async_info.return_value = _mocked_gogogate_sensor_response(None, None)
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == STATE_UNKNOWN
assert hass.states.get("sensor.door1_temperature").state == STATE_UNKNOWN
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert not hass.states.async_entity_ids(DOMAIN)
@patch("homeassistant.components.gogogate2.common.ISmartGateApi")
async def test_availability(ismartgateapi_mock, hass: HomeAssistant) -> None:
"""Test availability."""
bat_attributes = {
"device_class": "battery",
"door_id": 1,
"friendly_name": "Door1 battery",
"sensor_id": "ABCD",
"state_class": "measurement",
"unit_of_measurement": "%",
}
temp_attributes = {
"device_class": "temperature",
"door_id": 1,
"friendly_name": "Door1 temperature",
"sensor_id": "ABCD",
"unit_of_measurement": "°C",
"state_class": "measurement",
}
sensor_response = _mocked_ismartgate_sensor_response(35, -4.0)
api = MagicMock(ISmartGateApi)
api.async_info.return_value = sensor_response
ismartgateapi_mock.return_value = api
config_entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data={
CONF_DEVICE: DEVICE_TYPE_ISMARTGATE,
CONF_IP_ADDRESS: "127.0.0.1",
CONF_USERNAME: "admin",
CONF_PASSWORD: "password",
},
)
config_entry.add_to_hass(hass)
assert hass.states.get("cover.door1") is None
assert hass.states.get("cover.door2") is None
assert hass.states.get("cover.door3") is None
assert hass.states.get("sensor.door1_battery") is None
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door3_battery") is None
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("cover.door1")
assert hass.states.get("cover.door2")
assert hass.states.get("cover.door3")
assert hass.states.get("sensor.door1_battery").state == "35"
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door3_battery") is None
assert hass.states.get("sensor.door1_temperature").state == "-4.0"
assert hass.states.get("sensor.door2_temperature") is None
assert hass.states.get("sensor.door3_temperature") is None
assert (
hass.states.get("sensor.door1_battery").attributes[ATTR_DEVICE_CLASS]
== SensorDeviceClass.BATTERY
)
assert (
hass.states.get("sensor.door1_temperature").attributes[ATTR_DEVICE_CLASS]
== SensorDeviceClass.TEMPERATURE
)
assert (
hass.states.get("sensor.door1_temperature").attributes[ATTR_UNIT_OF_MEASUREMENT]
== "°C"
)
api.async_info.side_effect = Exception("Error")
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == STATE_UNAVAILABLE
assert hass.states.get("sensor.door1_temperature").state == STATE_UNAVAILABLE
api.async_info.side_effect = None
api.async_info.return_value = sensor_response
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == "35"
assert dict(hass.states.get("sensor.door1_battery").attributes) == bat_attributes
assert (
dict(hass.states.get("sensor.door1_temperature").attributes) == temp_attributes
)
|
0563c0404e6abc21b6a027fab6d94c70d994ea09
|
81a0a8218a45edcc8f295de5d41a3fd29cdc3ce6
|
/switch_model/balancing/operating_reserves/areas.py
|
e434d7a2233b704305e46a23446b45ecbd60d165
|
[
"Apache-2.0"
] |
permissive
|
switch-model/switch
|
af5ea212a141d97613ef1f13e550ee898fa352da
|
35bd3596a031fac7891f762cc87af610ded13615
|
refs/heads/master
| 2023-06-19T17:57:22.298285
| 2022-11-01T23:56:15
| 2022-11-01T23:56:15
| 33,576,546
| 114
| 81
|
NOASSERTION
| 2023-01-29T17:30:41
| 2015-04-08T00:59:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,268
|
py
|
areas.py
|
# Copyright (c) 2015-2022 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0, which is in the LICENSE file.
"""
Defines balancing areas for operational reserves.
"""
import os
from pyomo.environ import *
from switch_model.utilities import unique_list
dependencies = "switch_model.timescales", "switch_model.balancing.load_zones"
def define_components(mod):
"""
Augments a Pyomo abstract model object with sets and parameters that
describe balancing areas. Unless otherwise stated, each set and
parameter is mandatory.
zone_balancing_area[z] describes which balancing area each load zone
belongs to. This defaults to "system_wide_balancing_area".
BALANCING_AREAS is the set of balancing areas in which operational
reserves must be met. These are the unique names specified in the
zone_balancing_area[z] parameter. This can be abbreviated as b for indexes.
ZONES_IN_BALANCING_AREA[b] is the set of load zones in a given balancing
area.
BALANCING_AREA_TIMEPOINTS is the cross product of BALANCING_AREAS and
TIMEPOINTS.
"""
mod.zone_balancing_area = Param(
mod.LOAD_ZONES, default="system_wide_balancing_area", within=Any
)
mod.BALANCING_AREAS = Set(
dimen=1,
initialize=lambda m: unique_list(
m.zone_balancing_area[z] for z in m.LOAD_ZONES
),
)
mod.ZONES_IN_BALANCING_AREA = Set(
mod.BALANCING_AREAS,
dimen=1,
initialize=lambda m, b: (
z for z in m.LOAD_ZONES if m.zone_balancing_area[z] == b
),
)
mod.BALANCING_AREA_TIMEPOINTS = Set(
dimen=2, initialize=mod.BALANCING_AREAS * mod.TIMEPOINTS
)
def load_inputs(mod, switch_data, inputs_dir):
"""
Import balancing_area data. The following files are expected in the input
directory:
load_zones.csv
LOAD_ZONE, ..., zone_balancing_area
"""
# Include select in each load() function so that it will check out
# column names, be indifferent to column order, and throw an error
# message if some columns are not found.
switch_data.load_aug(
filename=os.path.join(inputs_dir, "load_zones.csv"),
param=(mod.zone_balancing_area),
)
|
16a431ea099fe4d023cc7cfb0761a0d206beb80c
|
d3c87e641adfe702ccac00101909f4f7d179a7f3
|
/pyrgg/graph_gen.py
|
e4d48d8fa918fc0657b95f3b827bae7484be06e5
|
[
"MIT"
] |
permissive
|
sepandhaghighi/pyrgg
|
21941bdc6c8f2ea083e66a5cab7317512ac777a3
|
d4fe3604c4e1a27f44ed8c482a8188057dda15ee
|
refs/heads/master
| 2023-09-01T05:20:43.389161
| 2023-07-06T18:09:32
| 2023-07-06T18:09:32
| 89,410,101
| 204
| 32
|
MIT
| 2023-08-19T12:47:25
| 2017-04-25T21:49:53
|
Python
|
UTF-8
|
Python
| false
| false
| 29,378
|
py
|
graph_gen.py
|
# -*- coding: utf-8 -*-
"""Pyrgg graph generators module."""
import random
import datetime
from pyrgg.params import *
from pyrgg.functions import *
# random_system=random.SystemRandom()
random_system = random
def dimacs_init(
file,
file_name,
min_weight,
max_weight,
vertices,
edge,
min_edge,
max_edge,
direct):
"""
Initialize dimacs output file.
:param file: output file object
:param file_name: file name
:type file_name: str
:type file: file_object
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: vertices number
:type vertices: int
:param edge: edge number
:type edge: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:return: None
"""
file.write(
DIMACS_FIX.format(
file_name,
str(vertices),
str(edge),
str(max_weight),
str(min_weight),
str(min_edge),
str(max_edge)))
def dimacs_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file and fill in.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".gr", "w") as buf:
dimacs_init(
buf,
file_name,
min_weight,
max_weight,
vertices,
edge_number,
min_edge,
max_edge,
direct,
)
_write_separated_file(
buf, edge_dic, weight_dic, separator=' ', prefix='a',
)
return edge_number
def json_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in json format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".json", "w") as buf:
_write_properties_to_json(
buf,
min_weight,
max_weight,
sign,
direct,
self_loop,
multigraph)
_write_data_to_json(
buf,
edge_dic,
weight_dic,
)
return edge_number
def _write_data_to_json(buf, edge_dic, weight_dic):
"""Write data to json buffer.
:param buf: output file object
:type buf: file_object
:param edge_dic: dictionary containing edges data
:type edge_dic: dict
:param weight_dic: dictionary containing weights data
:type weight_dic: dict
:return: None
"""
buf.write('\n\t"graph": {\n')
_write_nodes_to_json(buf, edge_dic)
buf.write("\n\t\t],\n")
_write_edges_to_json(buf, edge_dic, weight_dic)
buf.write("\n\t\t]\n\t}\n}")
def _write_properties_to_json(
buf,
min_weight,
max_weight,
sign,
direct,
self_loop,
multigraph):
"""
Write properties to json buffer.
:param buf: output file object
:type buf: file_object
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: None
"""
buf.write('{\n\t"properties": {\n')
buf.write('\t\t"directed": ' + str(direct).lower() + ",\n")
buf.write('\t\t"signed": ' + str(sign).lower() + ",\n")
buf.write('\t\t"multigraph": ' + str(multigraph).lower() + ",\n")
buf.write('\t\t"weighted": ' +
str(is_weighted(max_weight, min_weight, sign)).lower() + ",\n")
buf.write('\t\t"self_loop": ' + str(self_loop).lower() + "\n\t},")
def _write_nodes_to_json(buf, edge_dic):
"""Write nodes to json.
:param buf: output file object
:type buf: file_object
:param edge_dic: dictionary containing edges data
:type edge_dic: dict
:return: None
"""
first_line = True
nodes = '\t\t"nodes":[\n'
buf.write(nodes)
for key in edge_dic:
nodes = ""
if first_line:
first_line = False
else:
nodes += ",\n"
nodes = "".join([
nodes,
'\t\t{\n\t\t\t',
'"id": ',
str(key),
'\n\t\t}'
])
buf.write(nodes)
def _write_edges_to_json(buf, edge_dic, weight_dic):
"""Write edges to json.
:param buf: output file object
:type buf: file_object
:param edge_dic: dictionary containing edges data
:type edge_dic: dict
:param weight_dic: dictionary containing weights data
:type weight_dic: dict
:return: None
"""
edges = '\t\t"edges":[\n'
first_line = True
buf.write(edges)
for key, edge_val in edge_dic.items():
for j, value in enumerate(edge_val):
edges = ""
if first_line:
first_line = False
else:
edges += ",\n"
edges = "".join([
edges,
'\t\t{\n\t\t\t"source": ',
str(key),
',\n\t\t\t',
'"target": ',
str(value),
',\n\t\t\t',
'"weight": ',
str(weight_dic[key][j]),
'\n\t\t}'
])
buf.write(edges)
def csv_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in csv format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".csv", "w") as buf:
_write_separated_file(buf, edge_dic, weight_dic, separator=',')
return edge_number
def tsv_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in tsv format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".tsv", "w") as buf:
_write_separated_file(buf, edge_dic, weight_dic, separator='\t')
return edge_number
def _write_separated_file(buf, edge_dic, weight_dic, separator, prefix=''):
r"""Write data to buffer separated with ``separator``.
:param buf: output file object
:type buf: file_object
:param edge_dic: dictionary containing edges data
:type edge_dic: dict
:param weight_dic: dictionary containing weights data
:type weight_dic: dict
:param separator: separator in a separated file, like ',', '\t', ' ', etc.
:type separator: str
:param prefix: prefix to be added in front of each line
:type prefix: str
:return: None
"""
dummy_prefix = object()
prefix = prefix or dummy_prefix
for key, edge_val in edge_dic.items():
for j, value in enumerate(edge_val):
elements = [
prefix,
str(key),
str(value),
str(weight_dic[key][j]) + "\n"
]
string = separator.join(x for x in elements if x != dummy_prefix)
buf.write(string)
def wel_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in wel format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".wel", "w") as buf:
_write_separated_file(buf, edge_dic, weight_dic, separator=' ')
return edge_number
def mtx_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in Matrix Market format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
max_edge_length = len(str(vertices))
with open(file_name + ".mtx", "w") as buf:
buf.write("%%MatrixMarket matrix coordinate real general\n")
buf.write(
"{0} {0} {1}\n".format(str(vertices), str(edge_number))
)
for key, edge_val in edge_dic.items():
for j, value in enumerate(edge_val):
shift1 = (max_edge_length - len(str(key))) + 4
shift2 = (max_edge_length - len(str(value))) + 4
buf.write(str(key) + shift1 * " " + str(value) + shift2 * " " +
str(weight_dic[key][j]) + "\n")
return edge_number
def lp_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in ASP format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".lp", "w") as buf:
for key in edge_dic:
buf.write('node(' + str(key) + ").\n")
for key, edge_val in edge_dic.items():
for j, value in enumerate(edge_val):
buf.write('edge(' + str(key) + "," + str(value) +
"," + str(weight_dic[key][j]) + ").\n")
return edge_number
def tgf_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in Trivial Graph Format (TGF).
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".tgf", "w") as buf:
for key in edge_dic:
buf.write(str(key) + "\n")
buf.write("#\n")
_write_separated_file(buf, edge_dic, weight_dic, separator=' ')
return edge_number
def gl_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in Graph Line(GL).
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".gl", "w") as buf:
for key, edge_val in edge_dic.items():
line_data = str(key)
write_flag = False
for j, value in enumerate(edge_val):
write_flag = True
line_data += " " + str(value) + ":" + str(weight_dic[key][j])
if write_flag:
buf.write(line_data + "\n")
return edge_number
def dl_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in UCINET DL Format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".dl", "w") as buf:
buf.write("dl\nformat=edgelist1\nn=" + str(vertices) + "\ndata:\n")
_write_separated_file(buf, edge_dic, weight_dic, separator=' ')
return edge_number
def gdf_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in GDF Format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
with open(file_name + ".gdf", "w") as buf:
buf.write("nodedef>name VARCHAR,label VARCHAR\n")
for key in edge_dic:
buf.write(str(key) + "," + "Node{0}".format(str(key)) + "\n")
buf.write("edgedef>node1 VARCHAR,node2 VARCHAR,weight DOUBLE\n")
_write_separated_file(buf, edge_dic, weight_dic, separator=',')
return edge_number
def gml_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in GML Format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph,
)
header = 'graph\n[\n multigraph {0}\n directed {1}\n'.format(
int(multigraph), int(direct))
with open(file_name + ".gml", "w") as buf:
buf.write(header)
for key in edge_dic:
buf.write(
" node\n [\n id " +
str(key) +
"\n" +
' label "Node {0}"\n'.format(
str(key)) +
" ]\n")
for key, edge_val in edge_dic.items():
for j, value in enumerate(edge_val):
buf.write(" edge\n [\n source " +
str(key) +
"\n" +
" target " +
str(value) +
"\n" +
" value " +
str(weight_dic[key][j]) +
"\n" +
" ]\n")
buf.write("]")
return edge_number
def gexf_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in GEXF Format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph)
header = '<?xml version="1.0" encoding="UTF-8"?>\n'
header += '<gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">\n'
date = datetime.datetime.now().date()
meta = " " * 4 + '<meta lastmodifieddate="{0}">\n'.format(date)
meta += " " * 8 + '<creator>PyRGG</creator>\n'
meta += " " * 8 + '<description>{0}</description>\n'.format(file_name)
meta += " " * 4 + '</meta>\n'
if direct:
defaultedgetype = "directed"
else:
defaultedgetype = "undirected"
with open(file_name + ".gexf", "w") as buf:
buf.write(header)
buf.write(meta)
buf.write(
" " * 4 + '<graph defaultedgetype="' + defaultedgetype + '">\n'
)
buf.write(" " * 8 + "<nodes>\n")
for key in edge_dic:
buf.write(
" " * 12 +
'<node id="' +
str(key) + '"' +
' label="Node {0}" />'.format(
str(key)) + "\n")
buf.write(" " * 8 + "</nodes>\n")
buf.write(" " * 8 + "<edges>\n")
edge_id = 1
for key, edge_val in edge_dic.items():
for j, value in enumerate(edge_val):
buf.write(
" " * 12 +
'<edge id="' +
str(edge_id) + '"' +
' source="' +
str(key) + '"'
' target="' +
str(value) + '"' +
' weight="{0}" />'.format(
str(weight_dic[key][j])) + "\n")
edge_id += 1
buf.write(" " * 8 + "</edges>\n")
buf.write(" " * 4 + "</graph>\n")
buf.write("</gexf>")
return edge_number
def dot_maker(
file_name,
min_weight,
max_weight,
vertices,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph):
"""
Create output file in Dot Format.
:param file_name: file name
:type file_name: str
:param min_weight: weight min range
:type min_weight: int
:param max_weight: weight max range
:type max_weight: int
:param vertices: number of vertices
:type vertices: int
:param min_edge: minimum number of edges (connected to each vertex)
:type min_edge: int
:param max_edge: maximum number of edges (connected to each vertex)
:type max_edge: int
:param sign: weight sign flag
:type sign: bool
:param direct: directed and undirected graph flag
:type direct: bool
:param self_loop: self loop flag
:type self_loop: bool
:param multigraph: multigraph flag
:type multigraph: bool
:return: edge_number as int
"""
edge_dic, weight_dic, edge_number = edge_gen(
vertices,
min_weight,
max_weight,
min_edge,
max_edge,
sign,
direct,
self_loop,
multigraph)
header = "{0} {1}"
linker = "--"
if direct:
header = header.format("digraph", file_name)
linker = "->"
else:
header = header.format("graph", file_name)
with open(file_name + ".gv", "w") as buf:
buf.write(header + " {")
for key, edge_val in edge_dic.items():
for j, value in enumerate(edge_val):
buf.write(
"\n" +
str(key) +
" " +
linker +
" " +
str(value) +
" [weight={}]".format(
weight_dic[key][j]) +
";")
buf.write("\n}")
return edge_number
|
4b103a71372c48ca21cc843cf7a8b2df0eeb29b6
|
d068d41e02ab116cbd83ee9298c9ba357c668f85
|
/ipypublish/sphinx/notebook/__init__.py
|
fb494d9ab05ecb9614acc6df4a10c472b050cc50
|
[
"BSD-3-Clause"
] |
permissive
|
chrisjsewell/ipypublish
|
01f362cdf0989e119111a089bb307f52e23c1ef7
|
53fa92c4c7f18e36d8a9790b10de27219882f4e4
|
refs/heads/develop
| 2022-02-08T04:26:32.081511
| 2020-08-14T01:18:09
| 2020-08-14T01:18:09
| 96,322,423
| 233
| 42
|
BSD-3-Clause
| 2021-11-20T18:58:33
| 2017-07-05T13:29:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 104
|
py
|
__init__.py
|
# expose setup function for sphinx
from ipypublish.sphinx.notebook.extension import setup # noqa: F401
|
fe82c027bd180ce33576fe57e2f6b804b0bdd002
|
3bc139860403ebd05e278c95fca26e24d5189271
|
/tests/check_pytest_monitor_output.py
|
f6a740272d40882301aa8ecb00342331c548d2eb
|
[
"Apache-2.0"
] |
permissive
|
Chia-Network/chia-blockchain
|
a09183b7240b159419b45f8373a41a1062f77ef3
|
d966f3f9e63aed52dbd73544164202a9f11ce3d2
|
refs/heads/main
| 2023-08-31T09:37:13.741283
| 2023-08-30T18:27:22
| 2023-08-30T18:27:22
| 197,153,676
| 12,936
| 2,474
|
Apache-2.0
| 2023-09-14T19:08:51
| 2019-07-16T08:32:40
|
Python
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
check_pytest_monitor_output.py
|
#!/usr/bin/env python3
from __future__ import annotations
import sys
ret = 0
# example input line
# test_non_tx_aggregate_limits 0.997759588095738 1.45325589179993 554.45703125
for ln in sys.stdin:
line = ln.strip().split()
print(f"{float(line[1]) * 100.0: 8.1f}% CPU {float(line[2]):7.1f}s {float(line[3]): 8.2f} MB RAM {line[0]}")
limit = 800
# until this can be optimized, use higher limits
if "test_duplicate_coin_announces" in line[0]:
limit = 2200
elif (
"test_duplicate_large_integer_substr" in line[0]
or "test_duplicate_reserve_fee" in line[0]
or "test_duplicate_large_integer_negative" in line[0]
or "test_duplicate_large_integer" in line[0]
):
limit = 1100
if float(line[3]) > limit:
print(" ERROR: ^^ exceeded RAM limit ^^ \n")
ret += 1
if ret > 0:
print("some tests used too much RAM")
sys.exit(ret)
|
0b53105aae0209f0f44cc5a35271c9ed87f93f1f
|
9c2e531ec004adef7d8d07fa0b263b9f2f83cdc6
|
/eemont/dataframe.py
|
1bc2dbeb8353713203ab17e36a37c41039bc30fe
|
[
"MIT"
] |
permissive
|
davemlz/eemont
|
ac0a19938cd5a9457771ac65d345834ce86faf6b
|
7bddd88aa9ba040eff78009f2cafc55351911023
|
refs/heads/master
| 2023-08-17T03:36:29.013230
| 2023-08-17T00:22:47
| 2023-08-17T00:22:47
| 322,478,954
| 356
| 65
|
MIT
| 2022-04-05T08:11:22
| 2020-12-18T03:33:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,534
|
py
|
dataframe.py
|
import warnings
import ee
import pandas as pd
def _extend_pdDataFrame():
"""Decorator. Extends the pd.DataFrame class."""
return lambda f: (setattr(pd.core.frame.DataFrame, f.__name__, f) or f)
@_extend_pdDataFrame()
def toEEFeatureCollection(self, latitude=None, longitude=None):
"""Converts a pd.DataFrame object into an ee.FeatureCollection object.
If lat/lon coordinates are available, the Data Frame can be converted into
a Feature Collection with an associated geometry.
Tip
----------
Check more info about data conversion in the :ref:`User Guide<Data Conversion>`.
Parameters
----------
self : pd.DataFrame [this]
Data Frame to convert into a Feature Collection.
latitude : string
Name of a latitude column, if available. Coupled with a longitude column,
an ee.Geometry.Point is created and associated to each Feature.
longitude : string
Name of a longitude column, if available. Coupled with a latitude column,
an ee.Geometry.Point is created and associated to each Feature.
Returns
-------
ee.FeatureCollection
Data Frame converted into a Feature Collection.
Examples
--------
>>> import ee, eemont
>>> ee.Authenticate()
>>> ee.Initialize()
>>> df = pd.DataFrame()
>>> df['lat'] = [2.92846, 4.8927]
>>> df['lon'] = [-76.0269, -75.3188]
>>> df['name'] = ['Nevado del Huila', 'Nevado del Ruiz']
>>> fc = df.toEEFeatureCollection(latitude = 'lat',longitude = 'lon')
"""
def getFeature(r):
properties = r.to_dict()
if latitude != None and longitude == None:
warnings.warn(
"longitude missing, Feature Collection with no geometries generated!",
Warning,
)
return ee.Feature(None, properties)
elif latitude == None and longitude != None:
warnings.warn(
"latitude missing, Feature Collection with no geometries generated!",
Warning,
)
return ee.Feature(None, properties)
elif latitude != None and longitude != None:
point = ee.Geometry.Point([r[longitude], r[latitude]])
return ee.Feature(point, properties)
else:
return ee.Feature(None, properties)
dataFrame = self.copy()
dataFrame["feature"] = dataFrame.apply(getFeature, axis=1)
featureCollection = ee.FeatureCollection(dataFrame["feature"].tolist())
return featureCollection
|
4d27a8138a1bd53a731f27f0a70fbde3ffc3089b
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-docs/src/booklets/v2_2015/source/GLM_Vignette_code_examples/glm_recalculate_predict.py
|
1f05dbb4dad7740532d93513e89434f08e40cda2
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 140
|
py
|
glm_recalculate_predict.py
|
#manually define threshold for predictions to 0.3
import pandas as pd
pred = binomial_fit.predict(h2o_df)
pred['predict'] = pred['p1']>0.3
|
e533d3751c6cd5b70287ee2b8db5eb56ac834348
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/contrib/django_celery/test_django_celery.py
|
98fc5874a2c6cec6f90109b8782de3c57a643813
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
test_django_celery.py
|
from os.path import dirname
from os.path import sep
import subprocess
from tests.utils import call_program
def test_django_celery_gevent_startup():
"""Test that Celery starts correctly with the Django integration enabled.
If the Django integration force-loads some modules while patching, it is
likely that we might see lazy objects, like settings, being created before
time. This would cause Celery to trigger exceptions, causing the application
to fail to start.
In this particular instance we test that the application starts correctly
(albeit with no message broker running) and that we don't get any errors
about Django settings.
"""
try:
out, err, retcode, _ = call_program(
"ddtrace-run",
"celery",
"-A",
"proj",
"worker",
"--pool=gevent",
cwd=sep.join((dirname(__file__), "app")),
timeout=3,
)
except subprocess.TimeoutExpired as celery:
out = celery.stdout.decode("utf-8")
err = celery.stderr.decode("utf-8")
assert "celery@" in out, "Celery started correctly"
assert "DJANGO_SETTINGS_MODULE" not in err, "No Django lazy objects"
else:
assert retcode == 0, "Celery was finished with errors: %s" % err.decode("utf-8")
|
2647ca3e7bb93c2862ca7e15e8236a0e2ccca942
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/edgelm/examples/speech_text_joint_to_text/criterions/__init__.py
|
93924b75550a6b9a7c7de7f4defba9b8f8465926
|
[
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 502
|
py
|
__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_text_joint_to_text.criterions." + criterion_name
)
|
ddaf922da8a981b54d066f7c9adafb0e18be2b16
|
58a0baa4abf592b4618a5e2adc1a84777a39504f
|
/anytree/search.py
|
0a9c075fc4367b2d87fcbcec55ab18726bec83fa
|
[
"Apache-2.0"
] |
permissive
|
c0fec0de/anytree
|
baa64ad650beb310c9a6321242ca557cf7c1e92e
|
27ff97eed4c09b4f0eb9ae61b45dd30b794a135c
|
refs/heads/main
| 2023-09-01T00:26:02.398101
| 2022-09-20T20:58:59
| 2022-09-20T20:58:59
| 66,707,325
| 861
| 133
|
Apache-2.0
| 2023-06-25T20:17:17
| 2016-08-27T09:42:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,330
|
py
|
search.py
|
"""
Node Searching.
.. note:: You can speed-up node searching, by installing https://pypi.org/project/fastcache/ and
using :any:`cachedsearch`.
"""
from anytree.iterators import PreOrderIter
def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes matching `filter_` but stop at `maxlevel` or `stop`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum descending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall(f, filter_=lambda node: node.name in ("a", "b"))
(Node('/f/b'), Node('/f/b/a'))
>>> findall(f, filter_=lambda node: d in node.path)
(Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e'))
The number of matches can be limited:
>>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e'))
>>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e'))
"""
return _findall(node, filter_=filter_, stop=stop, maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
def findall_by_attr(node, value, name="name", maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum descending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall_by_attr(f, "d")
(Node('/f/b/d'),)
"""
return _findall(
node,
filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel,
mincount=mincount,
maxcount=maxcount,
)
def find(node, filter_=None, stop=None, maxlevel=None):
"""
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum descending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e'))
"""
return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel)
def find_by_attr(node, value, name="name", maxlevel=None):
"""
Search for *single* node with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum descending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d, foo=4)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find_by_attr(f, "d")
Node('/f/b/d')
>>> find_by_attr(f, name="foo", value=4)
Node('/f/b/d/c', foo=4)
>>> find_by_attr(f, name="foo", value=8)
"""
return _find(node, filter_=lambda n: _filter_by_name(n, name, value), maxlevel=maxlevel)
def _find(node, filter_, stop=None, maxlevel=None):
items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)
return items[0] if items else None
def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):
result = tuple(PreOrderIter(node, filter_, stop, maxlevel))
resultlen = len(result)
if mincount is not None and resultlen < mincount:
msg = "Expecting at least %d elements, but found %d."
raise CountError(msg % (mincount, resultlen), result)
if maxcount is not None and resultlen > maxcount:
msg = "Expecting %d elements at maximum, but found %d."
raise CountError(msg % (maxcount, resultlen), result)
return result
def _filter_by_name(node, name, value):
try:
return getattr(node, name) == value
except AttributeError:
return False
class CountError(RuntimeError):
def __init__(self, msg, result):
"""Error raised on `mincount` or `maxcount` mismatch."""
if result:
msg += " " + repr(result)
super(CountError, self).__init__(msg)
|
47a8057c3a7ff282254a541d8e9bcccf6ae7bc7b
|
cde096ba977b63becc1b9066677331ef4594a797
|
/csfieldguide/interactives/migrations/0017_merge_0015_auto_20210805_0142_0016_auto_20191129_0012.py
|
65e80d5a9da172b4cd6edbfa5e4f43eff36dd4eb
|
[
"CC-BY-NC-SA-4.0",
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Unlicense",
"LicenseRef-scancode-secret-labs-2011",
"WTFPL",
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-2.5",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uccser/cs-field-guide
|
655524b161fab0ab422679dd80720f660f2cfa98
|
ea3281ec6f4d17538f6d3cf6f88d74fa54581b34
|
refs/heads/develop
| 2023-08-28T14:33:58.789843
| 2023-08-28T08:24:03
| 2023-08-28T08:24:03
| 34,356,619
| 364
| 97
|
MIT
| 2023-09-14T08:58:55
| 2015-04-21T23:00:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 283
|
py
|
0017_merge_0015_auto_20210805_0142_0016_auto_20191129_0012.py
|
# Generated by Django 3.2.6 on 2021-08-18 22:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('interactives', '0015_auto_20210805_0142'),
('interactives', '0016_auto_20191129_0012'),
]
operations = [
]
|
98dcfd33553697d420b74abf0f863b9be6b23e0f
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/browser/browser_controls/android/DEPS
|
5437f8905b6a2f4bd67068d8962ba50b7be6c5c6
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 193
|
DEPS
|
noparent = True
include_rules = [
"+base/android",
"+base/test/android",
"+chrome/browser/flags",
"+content/public/android",
"+components/browser_ui/util/android",
"+ui/android",
]
|
|
14b9afb08a15873983148f58d56239ad160c8c76
|
1180c0bfe29959d95f3c131e6e839950e528d4ee
|
/13/realnitinworks/directors.py
|
332ed1d6a1a2164f4950ab31c2f2e693a073bc12
|
[] |
no_license
|
pybites/challenges
|
e3e461accd8e7f890aee8007ba5070086ef983fc
|
02b77652d0901e6e06cb9b1e7cb3e59c675445c2
|
refs/heads/community
| 2023-08-20T18:19:02.982214
| 2022-11-17T09:23:31
| 2022-11-17T09:23:31
| 78,264,928
| 764
| 3,115
| null | 2023-07-21T05:58:19
| 2017-01-07T07:17:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,649
|
py
|
directors.py
|
from collections import namedtuple, Counter, defaultdict
import csv
import statistics
MOVIES_DATASET = "movie_metadata.csv"
FROM_YEAR = "1960"
MIN_MOVIES = 4
TOP_N = 20
Movie = namedtuple('Movie', 'director title year rating')
def convert_csv_to_dict(data=MOVIES_DATASET):
""" Yield Movie(director, title, year, rating) record from input csv file """
with open(data) as csvfile:
for movie in csv.DictReader(csvfile):
yield Movie(
director=movie['director_name'],
title=movie['movie_title'].strip(),
year=movie['title_year'],
rating=movie['imdb_score']
)
def from_year(movies, year=FROM_YEAR):
""" Filter movies based on year"""
for movie in movies:
if not year:
continue
if movie.year >= year:
yield movie
def director_filter(movies, min_movies=MIN_MOVIES):
""" Filter movies whose their director's min movies """
director_count = Counter(
movie.director
for movie in movies
)
movies = from_year(convert_csv_to_dict())
return (
movie
for movie in movies
if director_count[movie.director] >= min_movies
)
def map_director_movie(movies):
""" Make a mapping of directors and their movies"""
def by_rating(movie):
return movie.rating
directors = defaultdict(lambda: defaultdict(list))
for movie in sorted(movies, key=by_rating, reverse=True):
directors[movie.director]["movies"].append(movie)
return directors
def calculate_mean_imdb_score(director_movie_map):
""" Add mean_rating key into the input map"""
for director, value in director_movie_map.items():
imdb_scores = []
for movie in value["movies"]:
imdb_scores.append(float(movie.rating))
director_movie_map[director]["mean_rating"] = round(statistics.mean(imdb_scores), 1)
return director_movie_map
def top_n_directors(director_movie_map, n=TOP_N):
""" Yield top n directors and their movie details """
def mean_rating(item):
_, details = item
return details.get("mean_rating")
sorted_directors = sorted(director_movie_map.items(), key=mean_rating, reverse=True)
for idx, (director, details) in enumerate(sorted_directors, start=1):
yield director, details
if idx >= n:
break
if __name__ == "__main__":
# Parse the csv into bunch of OrderedDict or namedtuples
movies = convert_csv_to_dict()
# Filter out movies that are made from 1960 onwards
movies = from_year(movies)
# Filter movies of those directors who have made 4 films or more
movies = director_filter(movies)
# Map director to their movies
director_movie_map = map_director_movie(movies)
# Calculate the mean imdb score of movies of each director
directors = calculate_mean_imdb_score(director_movie_map)
# This is calculated for indented output display
max_movie_name_len = max(
len(movie.title)
for _, details in top_n_directors(directors, n=20)
for movie in details["movies"]
)
# Display the results
for idx, (director, details) in enumerate(top_n_directors(directors, n=20), start=1):
mean_rating = details["mean_rating"]
idx = idx < 10 and f"0{idx}" or f"{idx}"
print(f'{idx}. {director:<{max_movie_name_len+1}} {mean_rating}')
print("-" * (max_movie_name_len + 10))
for movie in details["movies"]:
print(f"{movie.year}]{movie.title:<{max_movie_name_len}} {movie.rating}")
print("")
|
233307d388408095a072784aecccba6eacb2128f
|
97829061d8ed7965caad494278380fa8e8c35cb4
|
/experiments/utils.py
|
a55b2b20a195ee789792f6b33ee844ec7d6eb184
|
[
"MIT"
] |
permissive
|
ContinualAI/continual-learning-baselines
|
70b699fc1b910c46495945aa230fa18e39d3a217
|
118f10c0f049b451997bade5971d4ac9c749683c
|
refs/heads/main
| 2023-08-09T21:30:15.215420
| 2023-07-31T12:14:32
| 2023-07-31T12:14:32
| 359,855,911
| 116
| 15
|
MIT
| 2023-07-31T12:14:34
| 2021-04-20T15:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
utils.py
|
import random
from types import SimpleNamespace
from typing import Dict, Union
import numpy as np
import torch
from avalanche.benchmarks import dataset_benchmark
from avalanche.benchmarks.utils import AvalancheSubset
def set_seed(seed):
if seed is None:
return
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
def create_default_args(args_dict, additional_args=None):
args = SimpleNamespace()
for k, v in args_dict.items():
args.__dict__[k] = v
if additional_args is not None:
for k, v in additional_args.items():
args.__dict__[k] = v
return args
def restrict_dataset_size(scenario, size: int):
"""
Util used to restrict the size of the datasets coming from a scenario
param: size: size of the reduced training dataset
"""
modified_train_ds = []
modified_test_ds = []
modified_valid_ds = []
for i, train_ds in enumerate(scenario.train_stream):
train_ds_idx, _ = torch.utils.data.random_split(
torch.arange(len(train_ds.dataset)),
(size, len(train_ds.dataset) - size),
)
dataset = AvalancheSubset(train_ds.dataset, train_ds_idx)
modified_train_ds.append(dataset)
modified_test_ds.append(scenario.test_stream[i].dataset)
if hasattr(scenario, "valid_stream"):
modified_valid_ds.append(scenario.valid_stream[i].dataset)
scenario = dataset_benchmark(
modified_train_ds,
modified_test_ds,
other_streams_datasets={"valid": modified_valid_ds}
if len(modified_valid_ds) > 0
else None,
)
return scenario
|
2ffdc684b3defe4ce7e108852adae20ce4bd336d
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/orttraining/orttraining/test/python/orttraining_test_python_bindings.py
|
56338ddbaffefef5dd8e5f48bb2b8a665e53c95b
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 20,821
|
py
|
orttraining_test_python_bindings.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import annotations
import os
import pathlib
import tempfile
import numpy as np
import onnx
import pytest
import torch
from orttraining_test_onnxblock import _get_models
import onnxruntime.training.onnxblock as onnxblock
from onnxruntime import OrtValue, SessionOptions
from onnxruntime.training import artifacts
from onnxruntime.training.api import CheckpointState, LinearLRScheduler, Module, Optimizer
class SimpleModelWithCrossEntropyLoss(onnxblock.TrainingBlock):
def __init__(self):
super().__init__()
self.loss = onnxblock.loss.CrossEntropyLoss()
def build(self, output_name):
return self.loss(output_name)
def _create_training_artifacts(
artifact_directory: str | os.PathLike,
requires_grad: list[str] | None = None,
frozen_params: list[str] | None = None,
optimizer_type=artifacts.OptimType.AdamW,
):
device = "cpu"
batch_size, input_size, hidden_size, output_size = 64, 784, 500, 10
pt_model, onnx_model = _get_models(device, batch_size, input_size, hidden_size, output_size)
if requires_grad is None:
requires_grad = [name for name, param in pt_model.named_parameters() if param.requires_grad]
if frozen_params is None:
frozen_params = [name for name, param in pt_model.named_parameters() if not param.requires_grad]
artifacts.generate_artifacts(
onnx_model,
optimizer=optimizer_type,
loss=artifacts.LossType.CrossEntropyLoss,
requires_grad=requires_grad,
frozen_params=frozen_params,
artifact_directory=artifact_directory,
)
training_model_file = os.path.join(artifact_directory, "training_model.onnx")
eval_model_file = os.path.join(artifact_directory, "eval_model.onnx")
optimizer_model_file = os.path.join(artifact_directory, "optimizer_model.onnx")
checkpoint_file = os.path.join(artifact_directory, "checkpoint")
return checkpoint_file, training_model_file, eval_model_file, optimizer_model_file, pt_model
def test_train_step():
# Generating random data for testing.
inputs = torch.randn(64, 784).numpy()
labels = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
_,
pt_model,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state)
model.train()
ort_loss = model(inputs, labels)
# Calculate loss using pytorch model to compare it with Module's output.
pt_outputs = pt_model(torch.from_numpy(inputs))
loss_fn = torch.nn.CrossEntropyLoss()
pt_loss = loss_fn(pt_outputs, torch.from_numpy(labels).long())
assert np.allclose(ort_loss, pt_loss.detach().numpy())
def test_eval_step():
# Generating random data for testing.
inputs = torch.randn(64, 784).numpy()
labels = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
eval_model_file_path,
_,
_,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state, eval_model_file_path)
model.train()
model(inputs, labels)
model.eval()
fetches = model(inputs, labels)
assert fetches
@pytest.mark.parametrize("optimizer_type", [artifacts.OptimType.SGD, artifacts.OptimType.AdamW])
def test_optimizer_step(optimizer_type):
# Generating random data for testing.
inputs = torch.randn(64, 784).numpy()
labels = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
optimizer_model_file_path,
_,
) = _create_training_artifacts(temp_dir, optimizer_type=optimizer_type)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module and Optimizer.
model = Module(training_model_file_path, state)
optimizer = Optimizer(optimizer_model_file_path, model)
model.train()
old_flatten_params = model.get_contiguous_parameters()
model(inputs, labels)
optimizer.step()
new_params = model.get_contiguous_parameters()
# Assert that the parameters are updated.
assert not np.array_equal(old_flatten_params.numpy(), new_params.numpy())
@pytest.mark.parametrize("optimizer_type", [artifacts.OptimType.SGD, artifacts.OptimType.AdamW])
def test_get_and_set_lr(optimizer_type):
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
optimizer_model_file_path,
_,
) = _create_training_artifacts(temp_dir, optimizer_type=optimizer_type)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module and Optimizer.
model = Module(training_model_file_path, state)
optimizer = Optimizer(optimizer_model_file_path, model)
# Test get and set learning rate.
lr = optimizer.get_learning_rate()
assert round(lr, 3) == 0.001
optimizer.set_learning_rate(0.5)
new_lr = optimizer.get_learning_rate()
assert np.isclose(new_lr, 0.5)
assert lr != new_lr
@pytest.mark.parametrize("optimizer_type", [artifacts.OptimType.SGD, artifacts.OptimType.AdamW])
def test_scheduler_step(optimizer_type):
# Generating random data for testing.
inputs = torch.randn(64, 784).numpy()
labels = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
optimizer_model_file_path,
_,
) = _create_training_artifacts(temp_dir, optimizer_type=optimizer_type)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module and Optimizer.
model = Module(training_model_file_path, state)
optimizer = Optimizer(optimizer_model_file_path, model)
scheduler = LinearLRScheduler(optimizer, 1, 2, 0.2)
# Test get and set learning rate.
lr = optimizer.get_learning_rate()
assert np.allclose(lr, 0.0)
model.train()
model(inputs, labels)
optimizer.step()
scheduler.step()
# Get new learning rate.
new_lr = optimizer.get_learning_rate()
assert new_lr != lr
def test_training_module_checkpoint():
# Generating random data for testing.
inputs = torch.randn(64, 784).numpy()
labels = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
_,
_,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Training Module and Training Optimizer.
model = Module(training_model_file_path, state)
model.train()
model(inputs, labels)
checkpoint_save_path = os.path.join(temp_dir, "checkpoint_export.ckpt")
CheckpointState.save_checkpoint(state, checkpoint_save_path)
old_flatten_params = model.get_contiguous_parameters()
# Assert the checkpoint was saved.
assert os.path.exists(checkpoint_save_path)
# Assert the checkpoint parameters remain after saving.
new_state = CheckpointState.load_checkpoint(checkpoint_save_path)
new_model = Module(training_model_file_path, new_state)
new_params = new_model.get_contiguous_parameters()
assert np.array_equal(old_flatten_params.numpy(), new_params.numpy())
@pytest.mark.parametrize("optimizer_type", [artifacts.OptimType.SGD, artifacts.OptimType.AdamW])
@pytest.mark.parametrize("trainable_only", [True, False])
def test_copy_buffer_to_parameters(trainable_only, optimizer_type):
# Generating random data for testing.
inputs = torch.randn(64, 784).numpy()
labels = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
optimizer_model_file_path,
_,
) = _create_training_artifacts(
temp_dir,
requires_grad=["fc2.weight", "fc2.bias"],
frozen_params=["fc1.weight", "fc1.bias"],
optimizer_type=optimizer_type,
)
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module and Optimizer.
model = Module(training_model_file_path, state)
optimizer = Optimizer(optimizer_model_file_path, model)
# Keep a copy of the parameters.
old_output_params = model.get_contiguous_parameters(trainable_only=trainable_only)
# Run a Training Step.
model.train()
model(inputs, labels)
optimizer.step()
# Get the new parameters.
output_params = model.get_contiguous_parameters(trainable_only=trainable_only)
# Make sure old params are different from new params.
assert not np.array_equal(old_output_params.numpy(), output_params.numpy())
# Copy the old parameters to the model.
model.copy_buffer_to_parameters(old_output_params, trainable_only=trainable_only)
# Get the saved parameters.
saved_params = model.get_contiguous_parameters(trainable_only=trainable_only)
# Make sure the saved parameters are the same as the old parameters.
assert np.array_equal(old_output_params.numpy(), saved_params.numpy())
def test_export_model_for_inferencing():
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
eval_model_file_path,
_,
_,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state, eval_model_file_path)
# Export inference model
inference_model_file_path = os.path.join(temp_dir, "inference_model.onnx")
model.export_model_for_inferencing(inference_model_file_path, ["output-0"])
assert os.path.exists(inference_model_file_path)
def test_cuda_execution_provider():
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
_,
_,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state, device="cuda")
params = model.get_contiguous_parameters()
# Check if parameters are moved to cuda.
assert params.device_name() == "Cuda"
@pytest.mark.parametrize(
"property_value",
[-1, 0, 1, 1234567890, -1.0, -0.1, 0.1, 1.0, 1234.0, "hello", "world", "onnxruntime"],
)
def test_add_get_property(property_value):
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
_,
_,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
_ = Module(training_model_file_path, state)
# Float values in python are double precision.
# Convert to float32 to match the type of the property.
if isinstance(property_value, float):
property_value = float(np.float32(property_value))
state["property"] = property_value
assert "property" in state
assert state["property"] == property_value
CheckpointState.save_checkpoint(state, checkpoint_file_path)
new_state = CheckpointState.load_checkpoint(checkpoint_file_path)
assert "property" in new_state
assert new_state["property"] == property_value
def test_get_input_output_names():
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
eval_model_file_path,
_,
_,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state, eval_model_file_path)
training_model = onnx.load(training_model_file_path)
assert model.input_names() == [input.name for input in training_model.graph.input][:2]
assert model.output_names() == [output.name for output in training_model.graph.output][:1]
def test_ort_custom_ops():
def _create_custom_op_trainable_onnx_model():
"""This function takes in a pre generated custom op model and adds a trainable linear layer to it"""
onnx_model = onnx.load(os.path.join("testdata", "custom_op_library", "custom_op_test.onnx"))
onnx_model.graph.value_info.append(
onnx.helper.make_tensor_value_info("output_1", onnx.TensorProto.FLOAT, [3, 5])
)
class CustomOpBlockWithLinear(onnxblock.ForwardBlock):
def __init__(self):
super().__init__()
self.linear = onnxblock.blocks.Linear(5, 10)
def build(self, linear_input):
return self.linear(linear_input)
custom_op_block = CustomOpBlockWithLinear()
with onnxblock.base(onnx_model) as model_accessor:
model_accessor.model.opset_import.append(onnx.helper.make_opsetid("test.customop", 1))
model_accessor.model.opset_import.append(onnx.helper.make_opsetid("", 14))
model_accessor.model.ir_version = 7
_ = custom_op_block("output_1")
return custom_op_block.to_model_proto()
onnx_model = _create_custom_op_trainable_onnx_model()
custom_op_library = os.path.join(os.getcwd(), "libcustom_op_library.so")
with tempfile.TemporaryDirectory() as temp_dir:
artifacts.generate_artifacts(
onnx_model,
optimizer=artifacts.OptimType.AdamW,
loss=artifacts.LossType.CrossEntropyLoss,
requires_grad=[param.name for param in onnx_model.graph.initializer],
artifact_directory=temp_dir,
custom_op_library=custom_op_library,
)
session_options = SessionOptions()
session_options.register_custom_ops_library(custom_op_library)
training_model_file_path = pathlib.Path(temp_dir) / "training_model.onnx"
eval_model_file_path = pathlib.Path(temp_dir) / "eval_model.onnx"
checkpoint_file_path = pathlib.Path(temp_dir) / "checkpoint"
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
# The custom op library is built either for cuda or cpu (but not for both).
# Since the training api pipeline build uses cuda, we need to specify the device as cuda.
# Otherwise the custom op library will not have the required kernels.
model = Module(
training_model_file_path, state, eval_model_file_path, device="cuda", session_options=session_options
)
x = np.random.randn(3, 5).astype(np.float32)
y = np.random.randn(3, 5).astype(np.float32)
labels = np.random.randint(0, 10, size=(3,), dtype=np.int64)
_ = model(x, y, labels)
def test_string_inputs():
def _create_string_input_trainable_model():
"""This function creates an onnx model with string inputs"""
class BlockWithStringInputs(onnxblock.ForwardBlock):
def __init__(self):
super().__init__()
self.cast = onnxblock.blocks.Cast(to=onnx.TensorProto.FLOAT)
self.linear = onnxblock.blocks.Linear(4, 2)
def build(self, string_input):
return self.linear(self.cast(string_input))
string_block = BlockWithStringInputs()
with onnxblock.empty_base() as model_accessor:
model_accessor.model.graph.input.extend(
[
onnx.helper.make_tensor_value_info("input", onnx.TensorProto.STRING, [1, 4]),
]
)
_ = string_block("input")
return string_block.to_model_proto()
onnx_model = _create_string_input_trainable_model()
with tempfile.TemporaryDirectory() as temp_dir:
artifacts.generate_artifacts(
onnx_model,
optimizer=artifacts.OptimType.AdamW,
loss=artifacts.LossType.CrossEntropyLoss,
requires_grad=[param.name for param in onnx_model.graph.initializer],
artifact_directory=temp_dir,
)
training_model_file_path = pathlib.Path(temp_dir) / "training_model.onnx"
eval_model_file_path = pathlib.Path(temp_dir) / "eval_model.onnx"
checkpoint_file_path = pathlib.Path(temp_dir) / "checkpoint"
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state, eval_model_file_path)
strs = np.array([["1.0", "2.0", "3.0", "4.0"]], dtype=str)
labels = np.random.randint(0, 2, size=(1,), dtype=np.int64)
model.train()
_ = model(strs, labels)
model.eval()
_ = model(strs, labels)
def test_train_step_with_ort_values():
# Generating random data for testing.
inputs_np = torch.randn(64, 784).numpy()
inputs = OrtValue.ortvalue_from_numpy(inputs_np)
labels_np = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
labels = OrtValue.ortvalue_from_numpy(labels_np)
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
_,
_,
pt_model,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state)
model.train()
ort_loss = model(inputs, labels)
assert isinstance(ort_loss, OrtValue)
# Calculate loss using pytorch model to compare it with Module's output.
pt_outputs = pt_model(torch.from_numpy(inputs_np))
loss_fn = torch.nn.CrossEntropyLoss()
pt_loss = loss_fn(pt_outputs, torch.from_numpy(labels_np).long())
assert np.allclose(ort_loss.numpy(), pt_loss.detach().numpy())
def test_eval_step_with_ort_values():
# Generating random data for testing.
inputs_np = torch.randn(64, 784).numpy()
inputs = OrtValue.ortvalue_from_numpy(inputs_np)
labels_np = torch.randint(high=10, size=(64,), dtype=torch.int64).numpy()
labels = OrtValue.ortvalue_from_numpy(labels_np)
with tempfile.TemporaryDirectory() as temp_dir:
(
checkpoint_file_path,
training_model_file_path,
eval_model_file_path,
_,
_,
) = _create_training_artifacts(temp_dir)
# Create Checkpoint State.
state = CheckpointState.load_checkpoint(checkpoint_file_path)
# Create a Module.
model = Module(training_model_file_path, state, eval_model_file_path)
model.train()
model(inputs, labels)
model.eval()
fetches = model(inputs, labels)
assert isinstance(fetches, OrtValue)
assert fetches
|
d3bb24ed175bb6b7cfecd8efb76b4404d42e1da6
|
ce267742f0513584f897e5016daa9d698c7460b5
|
/plasmapy/simulation/particletracker.py
|
25339a742940d832b41220eaf169a3167be6e85b
|
[
"BSD-3-Clause"
] |
permissive
|
PlasmaPy/PlasmaPy
|
6a84db38a6ec05abe2eb8426d426d585193e628f
|
9b8c641c81a622b0ec1d75f5113c42aa1897495b
|
refs/heads/main
| 2023-09-01T12:23:21.722165
| 2023-08-30T22:23:41
| 2023-08-30T22:23:41
| 46,810,954
| 539
| 346
|
BSD-3-Clause
| 2023-09-11T16:01:55
| 2015-11-24T18:36:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,930
|
py
|
particletracker.py
|
"""
Class representing a group of particles.
.. attention::
|expect-api-changes|
"""
__all__ = ["ParticleTracker"]
import astropy.units as u
import numpy as np
import scipy.interpolate as interp
from astropy import constants
from plasmapy.particles import atomic
from plasmapy.simulation import particle_integrators
from plasmapy.utils.decorators import validate_quantities
class ParticleTracker:
"""
Object representing a species of particles: ions, electrons, or simply
a group of particles with a particular initial velocity distribution.
.. attention::
|expect-api-changes|
Parameters
----------
plasma : Plasma object
Plasma from which fields can be pulled.
type : `str`
Particle type. See `plasmapy.particles.particle_class.ParticleLike`
for suitable arguments. The default is a proton.
n_particles : `int`
Number of macroparticles. The default is a single particle.
scaling : `float`
Number of particles represented by each macroparticle.
The default is 1, which means a :math:`1:1` correspondence between particles
and macroparticles.
dt : `astropy.units.Quantity`
Duration of timestep.
nt : `int`
Number of timesteps.
Attributes
----------
x : `astropy.units.Quantity`
Current position. Shape (n, 3).
v : `astropy.units.Quantity`
Current velocity. Shape (n, 3).
position_history : `astropy.units.Quantity`
History of position. Shape (nt, n, 3).
velocity_history : `astropy.units.Quantity`
History of velocity. Shape (nt, n, 3).
q : `astropy.units.Quantity`
Charge of particle.
m : `astropy.units.Quantity`
Mass of particle.
eff_q : `astropy.units.Quantity`
Total charge of macroparticle.
eff_m : `astropy.units.Quantity`
Total mass of macroparticle.
Examples
--------
See `Particle Stepper Notebook`_.
.. _`Particle Stepper Notebook`: ../notebooks/simulation/particle_stepper.ipynb
"""
integrators = {"explicit_boris": particle_integrators.boris_push}
_wip_integrators = {}
_all_integrators = dict(**integrators, **_wip_integrators)
@validate_quantities(dt=u.s)
def __init__(
self,
plasma,
particle_type="p",
n_particles=1,
scaling=1,
dt=np.inf * u.s,
nt=np.inf,
integrator="explicit_boris",
):
if np.isinf(dt) and np.isinf(nt): # coverage: ignore
raise ValueError("Both dt and nt are infinite.")
self.q = atomic.charge_number(particle_type) * constants.e.si
self.m = atomic.particle_mass(particle_type)
self.N = int(n_particles)
self.scaling = scaling
self.eff_q = self.q * scaling
self.eff_m = self.m * scaling
self.plasma = plasma
self.dt = dt
self.NT = int(nt)
self.t = np.arange(nt) * dt
self.x = np.zeros((n_particles, 3), dtype=float) * u.m
self.v = np.zeros((n_particles, 3), dtype=float) * (u.m / u.s)
self.name = particle_type
self.position_history = np.zeros((self.NT, *self.x.shape), dtype=float) * u.m
self.velocity_history = np.zeros((self.NT, *self.v.shape), dtype=float) * (
u.m / u.s
)
# create intermediate array of dimension (nx,ny,nz,3) in order to allow
# interpolation on non-equal spatial domain dimensions
_B = np.moveaxis(self.plasma.magnetic_field.si.value, 0, -1)
_E = np.moveaxis(self.plasma.electric_field.si.value, 0, -1)
self.integrator = self._all_integrators[integrator]
self._B_interpolator = interp.RegularGridInterpolator(
(self.plasma.x.si.value, self.plasma.y.si.value, self.plasma.z.si.value),
_B,
method="linear",
bounds_error=True,
)
self._E_interpolator = interp.RegularGridInterpolator(
(self.plasma.x.si.value, self.plasma.y.si.value, self.plasma.z.si.value),
_E,
method="linear",
bounds_error=True,
)
def _interpolate_fields(self):
interpolated_b = self._B_interpolator(self.x.si.value) * u.T
interpolated_e = self._E_interpolator(self.x.si.value) * u.V / u.m
return interpolated_b, interpolated_e
@property
def kinetic_energy_history(self):
r"""
Calculate the kinetic energy history for each particle.
Returns
-------
`~astropy.units.Quantity`
Array of kinetic energies, shape (nt, n).
"""
return (self.velocity_history**2).sum(axis=-1) * self.eff_m / 2
def boris_push(self, init=False):
r"""
Implement the Boris algorithm for moving particles and updating their
velocities.
Parameters
----------
init : `bool`, optional
If `True`, does not change the particle positions and sets ``dt``
to ``-dt/2``.
Notes
-----
The Boris algorithm :cite:p:`boris:1970` is the standard energy
particle movement in plasma physics. See pages 58–63 of
:cite:t:`birdsall:2004` for more details.
Conceptually, the algorithm has three phases:
1. Add half the impulse from electric field.
2. Rotate the particle velocity about the direction of the magnetic
field.
3. Add the second half of the impulse from the electric field.
This ends up causing the magnetic field action to be properly
"centered" in time, and the algorithm conserves energy.
"""
b, e = self._interpolate_fields()
if init:
self.integrator(
self.x.copy(),
self.v,
b,
e,
self.q,
self.m,
-0.5 * self.dt,
) # we don't want to change position here
else:
self.integrator(
self.x,
self.v,
b,
e,
self.q,
self.m,
self.dt,
)
def run(self):
r"""
Run a simulation instance.
"""
self.boris_push(init=True)
self.position_history[0] = self.x
self.velocity_history[0] = self.v
for i in range(1, self.NT):
self.boris_push()
self.position_history[i] = self.x
self.velocity_history[i] = self.v
def __repr__(self, *args, **kwargs) -> str:
return (
f"Species(q={self.q:.4e},m={self.m:.4e},N={self.N},"
f'name="{self.name}",NT={self.NT})'
)
def __str__(self) -> str: # coverage: ignore
return (
f"{self.N} {self.scaling:.2e}-{self.name} with "
f"q = {self.q:.2e}, m = {self.m:.2e}, "
f"{self.saved_iterations} saved history "
f"steps over {self.NT} iterations"
)
def plot_trajectories(self): # coverage: ignore
r"""Draw trajectory history."""
import matplotlib.pyplot as plt
from astropy.visualization import quantity_support
quantity_support()
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
for p_index in range(self.N):
r = self.position_history[:, p_index]
x, y, z = r.T
ax.plot(x, y, z)
ax.set_title(self.name)
ax.set_xlabel("$x$ position")
ax.set_ylabel("$y$ position")
ax.set_zlabel("$z$ position")
plt.show()
def plot_time_trajectories(self, plot="xyz"): # coverage: ignore
r"""
Draw position history versus time.
Parameters
----------
plot : `str`, optional
Enable plotting of position component x, y, z for each of these
letters included in ``plot``.
"""
import matplotlib.pyplot as plt
from astropy.visualization import quantity_support
quantity_support()
fig, ax = plt.subplots()
for p_index in range(self.N):
r = self.position_history[:, p_index]
x, y, z = r.T
if "x" in plot:
ax.plot(self.t, x, label=f"x_{p_index}")
if "y" in plot:
ax.plot(self.t, y, label=f"y_{p_index}")
if "z" in plot:
ax.plot(self.t, z, label=f"z_{p_index}")
ax.set_title(self.name)
ax.legend(loc="best")
ax.grid()
plt.show()
def test_kinetic_energy(self):
r"""Test conservation of kinetic energy."""
assert np.allclose(
self.kinetic_energy_history,
self.kinetic_energy_history.mean(),
atol=3 * self.kinetic_energy_history.std(),
), "Kinetic energy is not conserved!"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.