blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da6aa8182c706c593f77cdfe96acc5fc18c46609
|
783bcccb13591e80b439e29782ecb977ae67c1f1
|
/testing/local-binder-k8s-hub/install-jupyterhub-chart
|
afa9214e29efa08166d95ac50a576c52e06f3aeb
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
jupyterhub/binderhub
|
3ff86c5e896e68c17c6fd000f1426837237cb186
|
84b1db907335d5e0307222fbfcb6de77a98db8a2
|
refs/heads/main
| 2023-08-29T14:10:35.419954
| 2023-08-02T08:28:21
| 2023-08-02T08:28:21
| 89,419,368
| 2,422
| 420
|
BSD-3-Clause
| 2023-09-05T09:08:40
| 2017-04-26T00:28:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,031
|
install-jupyterhub-chart
|
#!/usr/bin/env python3
"""
Makes a standalone installation of the JupyterHub Helm chart of the version
specified in the BinderHub Helm chart's Chart.yaml file, and use the
configuration for the JupyterHub Helm chart nested in the BinderHub helm chart's
configuration.
"""
import os
import sys
from subprocess import check_call
from tempfile import NamedTemporaryFile
from ruamel.yaml import YAML
yaml = YAML()
here = os.path.abspath(os.path.dirname(__file__))
helm_chart = os.path.join(here, os.pardir, os.pardir, "helm-chart")
def _get_jupyterhub_dependency_version():
"""
Extract JupyterHub Helm chart version from the BinderHub chart's
Chart.yaml file that lists its chart dependencies.
"""
chart_yaml = os.path.join(helm_chart, "binderhub", "Chart.yaml")
with open(chart_yaml) as f:
dependecies = yaml.load(f)
for dep in dependecies["dependencies"]:
if dep["name"] == "jupyterhub":
return dep["version"]
else:
raise ValueError(
f"JupyterHub as a Helm chart dependency not found in {chart_yaml}:\n{dependecies}"
)
with NamedTemporaryFile(mode="w") as tmp:
with open(os.path.join(helm_chart, "binderhub", "values.yaml")) as values_in:
jupyterhub_chart_config = yaml.load(values_in)["jupyterhub"]
yaml.dump(jupyterhub_chart_config, tmp.file)
tmp.flush()
cmd = ["helm", "upgrade", "--install", "binderhub-test"]
cmd.extend(
[
"jupyterhub",
"--repo=https://jupyterhub.github.io/helm-chart/",
f"--version={_get_jupyterhub_dependency_version()}",
f"--values={tmp.name}",
f'--values={os.path.join(here, "jupyterhub-chart-config.yaml")}',
]
)
if "--auth" in sys.argv:
cmd.extend(
[
f'--values={os.path.join(here, "jupyterhub-chart-config-auth-additions.yaml")}'
]
)
print("Installing the JupyterHub Helm chart by itself")
print(" ".join(cmd))
check_call(cmd)
|
|
2b0d9eda0ae1291e2b6f87e3aa40e3eccc865c9d
|
011ff3bff551529ce95ab0f115ea51dc02400c9e
|
/paystackapi/tests/test_cpanel.py
|
46998a40fcf09738fa4c3f7600c0fa278d2bcd1c
|
[
"MIT"
] |
permissive
|
andela-sjames/paystack-python
|
05ecd5622917c4ec80f6ba7ea91a1c145a48df12
|
bcc55546c6aacd6e27596f114d2ad9e6d14295af
|
refs/heads/master
| 2023-07-09T12:49:09.898397
| 2023-06-19T13:47:44
| 2023-06-19T13:47:44
| 51,199,027
| 110
| 44
|
MIT
| 2023-06-28T18:14:32
| 2016-02-06T10:53:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
test_cpanel.py
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.cpanel import ControlPanel
class TestPage(BaseTestCase):
@httpretty.activate
def test_fetch_payment_session_timeout(self):
"""Method defined to test fetch payment session timeout."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/integration/payment_session_timeout"),
content_type='text/json',
body='{"status": true, "message": "Payment session timeout retrieved"}',
status=201,
)
response = ControlPanel.fetch_payment_session_timeout()
self.assertTrue(response['status'])
@httpretty.activate
def test_update_payment_session_timeout(self):
"""Method defined to test update payment session timeout."""
httpretty.register_uri(
httpretty.PUT,
self.endpoint_url("/integration/payment_session_timeout"),
content_type='text/json',
body='{"status": true, "message": "Payment session timeout updated"}',
status=201,
)
response = ControlPanel.update_payment_session_timeout(timeout=30)
self.assertTrue(response['status'])
|
55099eac6337caee301840f879862a545ffc60bb
|
c4cdad5ab1fbd5829bd75c1cb7e796f104d7352a
|
/tests/test_core.py
|
4b90f4b916c6e5f8f7b306f5c324e9b16bb09261
|
[
"MIT"
] |
permissive
|
sampsyo/wideq
|
7bd2eab6d4b86eb8c7661e8f97e1d7a644cac5e8
|
511e342e5d3d5c3ae3c18d8fef7f922bf4c1f43c
|
refs/heads/master
| 2023-08-31T18:31:52.839607
| 2022-05-12T19:33:07
| 2022-05-12T19:34:41
| 116,520,474
| 325
| 162
|
MIT
| 2022-03-20T08:32:23
| 2018-01-06T22:34:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
test_core.py
|
import unittest
import responses
import wideq.core
class SimpleTest(unittest.TestCase):
@responses.activate
def test_gateway_en_US(self):
responses.add(
responses.POST,
"https://kic.lgthinq.com:46030/api/common/gatewayUriList",
json={
"lgedmRoot": {
"thinqUri": "https://aic.lgthinq.com:46030/api",
"empUri": "https://us.m.lgaccount.com",
"oauthUri": "https://us.lgeapi.com",
"countryCode": "US",
"langCode": "en-US",
}
},
)
gatewayInstance = wideq.core.Gateway.discover("US", "en-US")
self.assertEqual(len(responses.calls), 1)
self.assertEqual(gatewayInstance.country, "US")
self.assertEqual(gatewayInstance.language, "en-US")
self.assertEqual(
gatewayInstance.auth_base, "https://us.m.lgaccount.com"
)
self.assertEqual(
gatewayInstance.api_root, "https://aic.lgthinq.com:46030/api"
)
self.assertEqual(gatewayInstance.oauth_root, "https://us.lgeapi.com")
@responses.activate
def test_gateway_en_NO(self):
responses.add(
responses.POST,
"https://kic.lgthinq.com:46030/api/common/gatewayUriList",
json={
"lgedmRoot": {
"countryCode": "NO",
"langCode": "en-NO",
"thinqUri": "https://eic.lgthinq.com:46030/api",
"empUri": "https://no.m.lgaccount.com",
"oauthUri": "https://no.lgeapi.com",
}
},
)
gatewayInstance = wideq.core.Gateway.discover("NO", "en-NO")
self.assertEqual(len(responses.calls), 1)
self.assertEqual(gatewayInstance.country, "NO")
self.assertEqual(gatewayInstance.language, "en-NO")
self.assertEqual(
gatewayInstance.auth_base, "https://no.m.lgaccount.com"
)
self.assertEqual(
gatewayInstance.api_root, "https://eic.lgthinq.com:46030/api"
)
self.assertEqual(gatewayInstance.oauth_root, "https://no.lgeapi.com")
|
3d0aac741b0a203dfd9b90a70d5d71b86dae96ec
|
4b1b3cf75b4582bbaa7b0f80e4d833cb98d9c5c8
|
/src/canmatrix/j1939_decoder.py
|
3cd03c6f439cdf47c1975b4db5f2349d7906ab46
|
[
"BSD-2-Clause"
] |
permissive
|
ebroecker/canmatrix
|
e16ff9c4c8b9e67e75ec84c3de24c78708bda2ca
|
ac1a2378f084f90f3ea9da4b3f21491f3ea05c3a
|
refs/heads/development
| 2023-09-04T19:08:53.231876
| 2023-08-07T21:55:12
| 2023-08-07T21:55:12
| 9,304,497
| 835
| 398
|
BSD-2-Clause
| 2023-08-07T21:55:14
| 2013-04-08T19:08:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,440
|
py
|
j1939_decoder.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from builtins import *
import attr
import canmatrix.formats
try:
from importlib.resources import read_binary
except ImportError:
from pkgutil import get_data as read_binary
@attr.s
class j1939_decoder(object):
string = read_binary(__name__.rpartition('.')[0], "j1939.dbc")
j1939_db = canmatrix.formats.loads_flat(
string, import_type="dbc", dbcImportEncoding="utf8"
)
length = attr.ib(default=0) # type: int
count_succesive_frames = attr.ib(default=0) # type: int
transfered_pgn = attr.ib(default=0) # type: int
_data = attr.ib(init=False, default=bytearray())
def decode(self, arbitration_id, can_data, matrix = None):
if matrix is not None:
frame = matrix.frame_by_pgn(arbitration_id.pgn)
else:
frame = None
if frame is not None:
return ("regular " + frame.name, frame.decode(can_data))
elif self.j1939_db.frame_by_pgn(arbitration_id.pgn) is not None:
signals = self.j1939_db.decode(arbitration_id,can_data)
frame_name = self.j1939_db.frame_by_pgn(arbitration_id.pgn).name
return ("J1939 known: " + frame_name, signals)
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xECFF).pgn and can_data[0] == 32:
# BAM detected
self.length = (int(can_data[2]) << 8) + int(can_data[1])
self.count_succesive_frames = int(can_data[3])
self.transfered_pgn = (int(can_data[7]) << 16) + (int(can_data[6]) << 8) + int(can_data[5])
self.bytes_left = self.length
self._data = bytearray()
return ("BAM ", {})
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xECFF).pgn and can_data[0] == 16:
# RTS detected
self.length = (int(can_data[2]) << 8) + int(can_data[1])
self.count_of_packets = int(can_data[3])
self.total_count_of_packet_sent = int(can_data[4])
self.transfered_pgn = (int(can_data[7]) << 16) + (int(can_data[6]) << 8) + int(can_data[5])
return ("ERROR - decoding RTS not yet implemented")
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xECFF).pgn and can_data[0] == 17:
# CTS detected
self.max_packets_at_once = can_data[1]
self.sequence_number_to_start = can_data[2]
self.transfered_pgn = (int(can_data[7]) << 16) + (int(can_data[6]) << 8) + int(can_data[5])
return ("ERROR - decoding CTS not yet implemented")
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xECFF).pgn and can_data[0] == 19:
# ACK detected
self.message_size = (int(can_data[2]) << 8) + int(can_data[1])
self.count_of_packets = int(can_data[3])
self.transfered_pgn = (int(can_data[7]) << 16) + (int(can_data[6]) << 8) + int(can_data[5])
return ("ERROR - decoding ACK not yet implemented")
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xECFF).pgn and can_data[0] == 255:
# Connection Abort
self.abort_reason = can_data[1]
self.transfered_pgn = (int(can_data[7]) << 16) + (int(can_data[6]) << 8) + int(can_data[5])
return ("ERROR - decoding Connection Abbort not yet implemented")
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xEEFF).pgn:
#Address Claimed
#arbitration_id.j1939_source
#name in can_data[0:8]
return ("ERROR - address claim detected not yet implemented")
pass
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xEBFF).pgn:
# transfer data
self._data = self._data + can_data[1:min(8, self.bytes_left + 1)]
self.bytes_left = max(self.bytes_left - 7, 0)
if self.count_succesive_frames == 0:
#print(self._data)
frame = matrix.frame_by_pgn(self.transfered_pgn)
if frame is not None:
signals = frame.decode(self._data)
return ("BAM last data", signals)
return ("BAM last data", {})
else:
self.count_succesive_frames -= 1
return ("BAM data ", {})
return ("",{})
|
d7025672ae5697917cb91b85f5c5d8fd4c3e24ef
|
49600905e4aaa4929758997c5d1df09ff693534a
|
/njunmt/encoders/rnn_encoder.py
|
7f3a1c717eab3cd9b1ead10c30cd97ca2d9afa7d
|
[
"Apache-2.0"
] |
permissive
|
zhaocq-nlp/NJUNMT-tf
|
3466d967cdc96b2dc6b0fb6a3e769ec1b83010d2
|
01155c740705f1641ebf3134829cea0e212f2d28
|
refs/heads/v0.6
| 2018-12-04T18:55:36.641444
| 2018-01-26T06:39:42
| 2018-01-26T06:39:42
| 115,672,915
| 114
| 44
|
Apache-2.0
| 2018-01-27T13:54:31
| 2017-12-29T01:17:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,852
|
py
|
rnn_encoder.py
|
# Copyright 2017 Natural Language Processing Group, Nanjing University, zhaocq.nlp@gmail.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Define RNN-based encoders. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from njunmt.encoders.encoder import Encoder
from njunmt.utils.rnn_cell_utils import get_multilayer_rnn_cells
class StackBidirectionalRNNEncoder(Encoder):
""" Define stacked bidirectional RNN encoder. """
def __init__(self, params, mode, name=None, verbose=True):
""" Initializes the parameters of the encoder.
Args:
params: A dictionary of parameters to construct the
encoder architecture.
mode: A mode.
name: The name of this encoder.
verbose: Print encoder parameters if set True.
"""
super(StackBidirectionalRNNEncoder, self).__init__(params, mode, name, verbose)
self._cells_fw = get_multilayer_rnn_cells(**self.params['rnn_cell'])
self._cells_bw = get_multilayer_rnn_cells(**self.params['rnn_cell'])
@staticmethod
def default_params():
""" Returns a dictionary of default parameters of this encoder. """
return {
"rnn_cell": {
"cell_class": "LSTMCell",
"cell_params": {
"num_units": 1024,
},
"dropout_input_keep_prob": 1.0,
"dropout_state_keep_prob": 1.0,
"num_layers": 1
}
}
def encode(self, feature_ids, feature_length, input_modality, **kwargs):
""" Encodes the inputs via a stacked bi-directional RNN.
Args:
feature_ids: A Tensor, [batch_size, max_features_length].
feature_length: A Tensor, [batch_size, ].
input_modality: An instance of `Modality`.
**kwargs:
Returns: An instance of `collections.namedtuple`.
"""
with tf.variable_scope(input_modality.name):
inputs = input_modality.bottom(feature_ids)
scope = self.name
if "scope" in kwargs:
scope = kwargs.pop("scope")
# outputs: [batch_size, max_time, layers_output]
# layers_output = size_of_fw + size_of_bw
# the returned states:
# `tuple` type which has only one item, because we use MultiRNN cell for multiple cells
outputs, states_fw, states_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw=[self._cells_fw],
cells_bw=[self._cells_bw],
inputs=inputs,
sequence_length=feature_length,
dtype=tf.float32,
scope=scope,
**kwargs)
# because we use MultiRNNCell, unpack the top tuple structure
states_fw = states_fw[0]
states_bw = states_bw[0]
return self._encoder_output_tuple_type(
outputs=outputs,
final_states={
"forward": states_fw[-1],
"backward": states_bw[-1]},
attention_values=outputs,
attention_length=feature_length)
class UnidirectionalRNNEncoder(Encoder):
""" Define a unidirectional RNN encoder. """
def __init__(self, params, mode, name=None, verbose=True):
""" Initializes the parameters of the encoder.
Args:
params: A dictionary of parameters to construct the
encoder architecture.
mode: A mode.
name: The name of this encoder.
verbose: Print encoder parameters if set True.
"""
super(UnidirectionalRNNEncoder, self).__init__(params, mode, name, verbose)
self._cells_fw = get_multilayer_rnn_cells(**self.params['rnn_cell'])
@staticmethod
def default_params():
""" Returns a dictionary of default parameters of this encoder. """
return {
"rnn_cell": {
"cell_class": "LSTMCell",
"cell_params": {
"num_units": 1024,
},
"dropout_input_keep_prob": 1.0,
"dropout_state_keep_prob": 1.0,
"num_layers": 1
}
}
def encode(self, feature_ids, feature_length, input_modality, **kwargs):
""" Encodes the inputs.
Args:
feature_ids: A Tensor, [batch_size, max_features_length].
feature_length: A Tensor, [batch_size, ].
input_modality: An instance of `Modality`.
**kwargs:
Returns: An instance of `collections.namedtuple`.
"""
with tf.variable_scope(input_modality.name):
inputs = input_modality.bottom(feature_ids)
scope = self.name
if "scope" in kwargs:
scope = kwargs.pop("scope")
# outputs: [batch_size, max_time, num_units_of_hidden]
outputs, states = tf.nn.dynamic_rnn(
cell=self._cells_fw,
inputs=inputs,
sequence_length=feature_length,
dtype=tf.float32,
scope=scope,
**kwargs)
return self._encoder_output_tuple_type(
outputs=outputs,
final_statest=states[-1],
attention_values=outputs,
attention_length=feature_length)
|
b721bfac00adac69a38ea9931e4ae89d6604ae8e
|
5c363c50c54175a982330ec888401b3e394373ab
|
/benchmarking/nursery/benchmark_multiobjective/benchmark_definitions.py
|
eb269cb0a4bb8ec6da5b68bcf84dc896ef4a780c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
awslabs/syne-tune
|
b14fb008f63def6a172bea6cc451f4e1906647f5
|
c35686e1b5947d45384fd1d41a44e013da53ef43
|
refs/heads/main
| 2023-08-14T14:21:48.995716
| 2023-08-03T12:57:13
| 2023-08-03T12:57:13
| 417,499,108
| 313
| 47
|
Apache-2.0
| 2023-09-14T14:06:54
| 2021-10-15T12:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
benchmark_definitions.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from syne_tune.experiments.benchmark_definitions import (
SurrogateBenchmarkDefinition,
)
def fcnet_mo_benchmark(dataset_name):
return SurrogateBenchmarkDefinition(
max_wallclock_time=int(1e26),
n_workers=1,
max_num_evaluations=1000,
elapsed_time_attr="metric_elapsed_time",
metric=["metric_valid_loss", "metric_n_params"],
mode=["min", "min"],
blackbox_name="fcnet",
dataset_name=dataset_name,
)
fcnet_mo_benchmark_definitions = {
"fcnet-protein": fcnet_mo_benchmark("protein_structure"),
"fcnet-naval": fcnet_mo_benchmark("naval_propulsion"),
"fcnet-parkinsons": fcnet_mo_benchmark("parkinsons_telemonitoring"),
"fcnet-slice": fcnet_mo_benchmark("slice_localization"),
}
def nas201_mo_benchmark(dataset_name):
return SurrogateBenchmarkDefinition(
max_wallclock_time=int(1e26),
n_workers=1,
max_num_evaluations=400 * 200,
elapsed_time_attr="metric_elapsed_time",
metric=["metric_valid_error", "metric_latency"],
mode=["min", "min"],
blackbox_name="nasbench201",
dataset_name=dataset_name,
max_resource_attr="epochs",
)
nas201_mo_benchmark_definitions = {
"nas201-mo-cifar10": nas201_mo_benchmark("cifar10"),
"nas201-mo-cifar100": nas201_mo_benchmark("cifar100"),
"nas201-mo-ImageNet16-120": nas201_mo_benchmark("ImageNet16-120"),
}
benchmark_definitions = {
**nas201_mo_benchmark_definitions,
**fcnet_mo_benchmark_definitions,
}
|
e7834ece43f9eefd7db7d4cba0fb33cf6f30113c
|
38d1c6a920b3d9534f191fa0bfcd6d7d4625d643
|
/my/instagram/common.py
|
36c6b83a018cec0bc5b04a4b39ef9a7b8ce0f652
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
karlicoss/HPI
|
ab87dcbfd8e3af788144990c35e030577227966b
|
c283e542e3457ecd778fb09e54e725d67104a49a
|
refs/heads/master
| 2023-08-31T11:38:23.547022
| 2023-08-24T22:29:14
| 2023-08-24T22:46:23
| 209,134,309
| 1,252
| 65
|
MIT
| 2023-09-07T01:36:27
| 2019-09-17T18:59:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
common.py
|
from dataclasses import replace
from datetime import datetime
from itertools import chain
from typing import Iterator, Dict, Any, Protocol
from my.core import warn_if_empty, Res
class User(Protocol):
id: str
username: str
full_name: str
class Message(Protocol):
created: datetime
text: str
thread_id: str
# property because it's more mypy friendly
@property
def user(self) -> User: ...
@warn_if_empty
def _merge_messages(*sources: Iterator[Res[Message]]) -> Iterator[Res[Message]]:
# TODO double check it works w.r.t. naive/aware timestamps?
def key(r: Res[Message]):
if isinstance(r, Exception):
# NOTE: using str() against Exception is nice so exceptions with same args are treated the same..
return str(r)
dt = r.created
# seems that GDPR has millisecond resolution.. so best to strip them off when merging
round_us = dt.microsecond // 1000 * 1000
without_us = r.created.replace(microsecond=round_us)
# using text as key is a bit crap.. but atm there are no better shared fields
return (without_us, r.text)
# ugh. seems that GDPR thread ids are completely uncorrelated to any android ids (tried searching over all sqlite dump)
# so the only way to correlate is to try and match messages
# we also can't use unique_everseen here, otherwise will never get a chance to unify threads
mmap: Dict[str, Message] = {}
thread_map = {}
user_map = {}
for m in chain(*sources):
if isinstance(m, Exception):
yield m
continue
k = key(m)
mm = mmap.get(k)
if mm is not None:
# already emitted, we get a chance to populate mappings
if m.thread_id not in thread_map:
thread_map[m.thread_id] = mm.thread_id
if m.user.id not in user_map:
user_map[m.user.id] = mm.user
else:
# not emitted yet, need to emit
repls: Dict[str, Any] = {}
tid = thread_map.get(m.thread_id)
if tid is not None:
repls['thread_id'] = tid
user = user_map.get(m.user.id)
if user is not None:
repls['user'] = user
if len(repls) > 0:
m = replace(m, **repls) # type: ignore[type-var, misc] # ugh mypy is confused because of Protocol?
mmap[k] = m
yield m
|
8206242c23e27ba1d4c2e273b4e953d182d140f5
|
fd8ef75bb06383538cdb21ed2a0ef88e570179b7
|
/src/openfermion/ops/operators/symbolic_operator_test.py
|
2c5691ce1c2d29b33521ee6c97b054fcdb030edc
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
quantumlib/OpenFermion
|
d1147383f99573d19005bd0f3e0120e9e9bed04c
|
788481753c798a72c5cb3aa9f2aa9da3ce3190b0
|
refs/heads/master
| 2023-09-04T11:00:32.124157
| 2023-08-24T21:54:30
| 2023-08-24T21:54:30
| 104,403,768
| 1,481
| 406
|
Apache-2.0
| 2023-08-24T21:54:31
| 2017-09-21T22:10:28
|
Python
|
UTF-8
|
Python
| false
| false
| 46,765
|
py
|
symbolic_operator_test.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests symbolic_operator.py."""
import copy
import unittest
import warnings
import numpy
import sympy
from openfermion.config import EQ_TOLERANCE
from openfermion.testing.testing_utils import EqualsTester
from openfermion.ops.operators.symbolic_operator import SymbolicOperator
class DummyOperator1(SymbolicOperator):
"""Subclass of SymbolicOperator created for testing purposes."""
@property
def actions(self):
"""The allowed actions."""
return (1, 0)
@property
def action_strings(self):
"""The string representations of the allowed actions."""
return ('^', '')
@property
def action_before_index(self):
"""Whether action comes before index in string representations."""
return False
@property
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
return False
class DummyOperator2(SymbolicOperator):
"""Subclass of SymbolicOperator created for testing purposes."""
@property
def actions(self):
"""The allowed actions."""
return ('X', 'Y', 'Z')
@property
def action_strings(self):
"""The string representations of the allowed actions."""
return ('X', 'Y', 'Z')
@property
def action_before_index(self):
"""Whether action comes before index in string representations."""
return True
@property
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
return True
class GeneralTest(unittest.TestCase):
"""General tests."""
def test_symbolic_operator_is_abstract_cant_instantiate(self):
with self.assertRaises(TypeError):
_ = SymbolicOperator()
def test_symbolic_operator_constant(self):
op = DummyOperator1((), 1.723)
self.assertEqual(op.constant, 1.723)
op = DummyOperator1('1^ 4', 0.182)
self.assertEqual(op.constant, 0.0)
def test_init_single_factor(self):
"""Test initialization of the form DummyOperator((index, action))."""
equals_tester = EqualsTester(self)
group_1 = [DummyOperator1((3, 0)), DummyOperator1(((3, 0),))]
group_2 = [DummyOperator2((5, 'X')), DummyOperator2(((5, 'X'),))]
group_3 = [
DummyOperator2((5, 'X'), .5),
DummyOperator2(((5, 'X'),), .5)
]
equals_tester.add_equality_group(*group_1)
equals_tester.add_equality_group(*group_2)
equals_tester.add_equality_group(*group_3)
def test_eq_and_ne(self):
"""Test == and !=."""
equals_tester = EqualsTester(self)
zeros_1 = [
DummyOperator1(),
DummyOperator1('1^ 0', 0.),
DummyOperator1('1^ 0', -1j) * 0,
DummyOperator1('1^ 0', 0 * sympy.Symbol('x')),
DummyOperator1('1^ 0', sympy.Symbol('x')) * 0
]
zeros_2 = [
DummyOperator2(),
DummyOperator2(((1, 'Y'), (0, 'X')), 0.),
DummyOperator2(((1, 'Y'), (0, 'X')), -1j) * 0,
DummyOperator2(((1, 'Y'), (0, 'X')), 0 * sympy.Symbol('x')),
DummyOperator2(((1, 'Y'), (0, 'X')), sympy.Symbol('x')) * 0
]
different_ops_1 = [
DummyOperator1(((1, 0),), -0.1j),
DummyOperator1(((1, 1),), -0.1j),
(DummyOperator1(((1, 0),), -0.1j) + DummyOperator1(
((1, 1),), -0.1j))
]
different_ops_2 = [
DummyOperator2(((1, 'Y'),), -0.1j),
DummyOperator2(((1, 'X'),), -0.1j),
(DummyOperator2(((1, 'Y'),), -0.1j) + DummyOperator2(
((2, 'Y'),), -0.1j))
]
sympy_ops_1 = [
DummyOperator1('1^ 0', sympy.Symbol('x')),
DummyOperator1('1^ 0', 2 * sympy.Symbol('x')) / 2,
DummyOperator1('1^ 0',
sympy.Symbol('x') * sympy.Symbol('y')) * 1 /
sympy.Symbol('y')
]
sympy_ops_2 = [DummyOperator1('1^ 0', sympy.Symbol('x') + 1)]
equals_tester.add_equality_group(*sympy_ops_2)
equals_tester.add_equality_group(*zeros_1)
equals_tester.add_equality_group(*zeros_2)
equals_tester.add_equality_group(*sympy_ops_1)
for op in different_ops_1:
equals_tester.add_equality_group(op)
for op in different_ops_2:
equals_tester.add_equality_group(op)
def test_many_body_order(self):
"""Test computing the many-body order."""
zero = DummyOperator1()
identity = DummyOperator2(())
op1 = DummyOperator1('0^ 3 5^ 6')
op2 = op1 + DummyOperator1('8^ 3')
op3 = op2 + DummyOperator1(u'1^ 2 3^ 4 5 ')
op4 = DummyOperator2('X0 X1 Y3')
op5 = op4 - DummyOperator2('Z0')
op6 = op5 - DummyOperator2('Z1 Z2 Y3 Y4 Y9 Y10')
op7 = op5 - DummyOperator2('Z1 Z2 Y3 Y4 Y9 Y10', EQ_TOLERANCE / 2.)
self.assertEqual(zero.many_body_order(), 0)
self.assertEqual(identity.many_body_order(), 0)
self.assertEqual(op1.many_body_order(), 4)
self.assertEqual(op2.many_body_order(), 4)
self.assertEqual(op3.many_body_order(), 5)
self.assertEqual(op4.many_body_order(), 3)
self.assertEqual(op5.many_body_order(), 3)
self.assertEqual(op6.many_body_order(), 6)
self.assertEqual(op7.many_body_order(), 3)
def test_iter(self):
op1 = DummyOperator1('0^ 3 5^ 6')
op2 = DummyOperator1('8^ 3')
opsum = op1 + op2
op_list = []
for op_term in opsum:
op_list.append(op_term)
self.assertEqual(len(op_list), 2)
self.assertEqual(op_list[0], op1)
self.assertEqual(op_list[1], op2)
class SymbolicOperatorTest1(unittest.TestCase):
"""Test the subclass DummyOperator1."""
def test_init_defaults(self):
loc_op = DummyOperator1()
self.assertEqual(len(loc_op.terms), 0)
def test_init_tuple_real_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = 0.5
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_tuple_complex_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = 0.6j
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_tuple_npfloat64_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = numpy.float64(2.303)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_tuple_npcomplex128_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = numpy.complex128(-1.123j + 43.7)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_real_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = 1. / 3
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_complex_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = 2j / 3.
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_npfloat64_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = numpy.float64(2.3037)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_npcomplex128_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = numpy.complex128(-1.1237j + 43.37)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_identity_is_multiplicative_identity(self):
u = DummyOperator1.identity()
f = DummyOperator1(((0, 1), (5, 0), (6, 1)), 0.6j)
g = DummyOperator1(((0, 0), (5, 0), (6, 1)), 0.3j)
h = f + g
self.assertTrue(f == u * f)
self.assertTrue(f == f * u)
self.assertTrue(g == u * g)
self.assertTrue(g == g * u)
self.assertTrue(h == u * h)
self.assertTrue(h == h * u)
u *= h
self.assertTrue(h == u)
self.assertFalse(f == u)
# Method always returns new instances.
self.assertFalse(DummyOperator1.identity() == u)
def test_zero_is_additive_identity(self):
o = DummyOperator1.zero()
f = DummyOperator1(((0, 1), (5, 0), (6, 1)), 0.6j)
g = DummyOperator1(((0, 0), (5, 0), (6, 1)), 0.3j)
h = f + g
self.assertTrue(f == o + f)
self.assertTrue(f == f + o)
self.assertTrue(g == o + g)
self.assertTrue(g == g + o)
self.assertTrue(h == o + h)
self.assertTrue(h == h + o)
o += h
self.assertTrue(h == o)
self.assertFalse(f == o)
# Method always returns new instances.
self.assertFalse(DummyOperator1.zero() == o)
def test_zero_is_multiplicative_nil(self):
o = DummyOperator1.zero()
u = DummyOperator1.identity()
f = DummyOperator1(((0, 1), (5, 0), (6, 1)), 0.6j)
g = DummyOperator1(((0, 0), (5, 0), (6, 1)), 0.3j)
self.assertTrue(o == o * u)
self.assertTrue(o == o * f)
self.assertTrue(o == o * g)
self.assertTrue(o == o * (f + g))
def test_init_str(self):
fermion_op = DummyOperator1('0^ 5 12^', -1.)
correct = ((0, 1), (5, 0), (12, 1))
self.assertIn(correct, fermion_op.terms)
self.assertEqual(fermion_op.terms[correct], -1.0)
def test_init_long_str_repeated(self):
fermion_op = DummyOperator1('-2 [0^ 1] + [0^ 1]')
correct = -1 * DummyOperator1('0^ 1')
self.assertTrue(fermion_op == correct)
def test_raises_error_negative_indices(self):
with self.assertRaises(ValueError):
_ = DummyOperator2('X-1 Y0')
with self.assertRaises(ValueError):
_ = DummyOperator1('-1^ 0')
def test_init_long_str(self):
fermion_op = DummyOperator1(
'(-2.0+3.0j) [0^ 1] +\n\n -1.0[ 2^ 3 ] - []', -1.)
correct = \
DummyOperator1('0^ 1', complex(2., -3.)) + \
DummyOperator1('2^ 3', 1.) + \
DummyOperator1('', 1.)
self.assertEqual(len((fermion_op - correct).terms), 0)
reparsed_op = DummyOperator1(str(fermion_op))
self.assertEqual(len((fermion_op - reparsed_op).terms), 0)
fermion_op = DummyOperator1('1.7 [3^ 2] - 8 [4^]')
correct = DummyOperator1('3^ 2', 1.7) + DummyOperator1('4^', -8.)
self.assertEqual(len((fermion_op - correct).terms), 0)
fermion_op = DummyOperator1('-(2.3 + 1.7j) [3^ 2]')
correct = DummyOperator1('3^ 2', complex(-2.3, -1.7))
self.assertEqual(len((fermion_op - correct).terms), 0)
def test_merges_multiple_whitespace(self):
fermion_op = DummyOperator1(' \n ')
self.assertEqual(fermion_op.terms, {(): 1})
def test_init_str_identity(self):
fermion_op = DummyOperator1('')
self.assertIn((), fermion_op.terms)
def test_init_with_sympy(self):
fermion_op = DummyOperator1('0^', sympy.Symbol('x'))
self.assertEqual(fermion_op.terms[((0, 1),)], sympy.Symbol('x'))
def test_init_bad_term(self):
with self.assertRaises(ValueError):
DummyOperator1(2)
def test_init_bad_coefficient(self):
with self.assertRaises(ValueError):
DummyOperator1('0^', "0.5")
def test_init_bad_action_str(self):
with self.assertRaises(ValueError):
DummyOperator1('0-')
def test_init_bad_action_tuple(self):
with self.assertRaises(ValueError):
DummyOperator1(((0, 2),))
def test_init_bad_tuple(self):
with self.assertRaises(ValueError):
DummyOperator1(((0, 1, 1),))
def test_init_bad_str(self):
with self.assertRaises(ValueError):
DummyOperator1('^')
def test_init_bad_mode_num(self):
with self.assertRaises(ValueError):
DummyOperator1('-1^')
def test_init_invalid_tensor_factor(self):
with self.assertRaises(ValueError):
DummyOperator1(((-2, 1), (1, 0)))
def test_DummyOperator1(self):
op = DummyOperator1((), 3.)
self.assertTrue(op == DummyOperator1(()) * 3.)
def test_imul_inplace(self):
fermion_op = DummyOperator1("1^")
prev_id = id(fermion_op)
fermion_op *= 3.
self.assertEqual(id(fermion_op), prev_id)
self.assertEqual(fermion_op.terms[((1, 1),)], 3.)
def test_imul_scalar_real(self):
loc_op = ((1, 0), (2, 1))
multiplier = 0.5
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_scalar_complex(self):
loc_op = ((1, 0), (2, 1))
multiplier = 0.6j
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_sympy(self):
loc_op = ((1, 0), (2, 1))
multiplier = sympy.Symbol('x')
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertTrue(fermion_op.terms[loc_op] - multiplier == 0)
def test_imul_sympy_2(self):
loc_op = ((1, 0), (2, 1))
multiplier = sympy.Symbol('x') + 3
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertTrue(fermion_op.terms[loc_op] - multiplier == 0)
def test_imul_sympy_ops(self):
loc_op1 = ((1, 0), (2, 1))
coeff1 = sympy.Symbol('x') + 3
loc_op2 = ((1, 1), (3, 1))
coeff2 = sympy.Symbol('x') + 5
fermion_op = DummyOperator1(loc_op1, coeff1)
fermion_op *= DummyOperator1(loc_op2, coeff2)
self.assertTrue(fermion_op.terms[loc_op1 + loc_op2] -
coeff1 * coeff2 == 0)
def test_imul_scalar_npfloat64(self):
loc_op = ((1, 0), (2, 1))
multiplier = numpy.float64(2.303)
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_scalar_npcomplex128(self):
loc_op = ((1, 0), (2, 1))
multiplier = numpy.complex128(-1.123j + 1.7911)
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_fermion_op(self):
op1 = DummyOperator1(((0, 1), (3, 0), (8, 1), (8, 0), (11, 1)), 3.j)
op2 = DummyOperator1(((1, 1), (3, 1), (8, 0)), 0.5)
op1 *= op2
correct_term = ((0, 1), (3, 0), (8, 1), (8, 0), (11, 1), (1, 1), (3, 1),
(8, 0))
self.assertEqual(len(op1.terms), 1)
self.assertIn(correct_term, op1.terms)
def test_imul_fermion_op_2(self):
op3 = DummyOperator1(((1, 1), (0, 0)), -1j)
op4 = DummyOperator1(((1, 0), (0, 1), (2, 1)), -1.5)
op3 *= op4
op4 *= op3
self.assertIn(((1, 1), (0, 0), (1, 0), (0, 1), (2, 1)), op3.terms)
self.assertEqual(op3.terms[((1, 1), (0, 0), (1, 0), (0, 1), (2, 1))],
1.5j)
def test_imul_fermion_op_duplicate_term(self):
op1 = DummyOperator1('1 2 3')
op1 += DummyOperator1('1 2')
op1 += DummyOperator1('1')
op2 = DummyOperator1('3')
op2 += DummyOperator1('2 3')
op1 *= op2
self.assertAlmostEqual(op1.terms[((1, 0), (2, 0), (3, 0))], 2.)
def test_imul_bidir(self):
op_a = DummyOperator1(((1, 1), (0, 0)), -1j)
op_b = DummyOperator1(((1, 1), (0, 1), (2, 1)), -1.5)
op_a *= op_b
op_b *= op_a
self.assertIn(((1, 1), (0, 0), (1, 1), (0, 1), (2, 1)), op_a.terms)
self.assertEqual(op_a.terms[((1, 1), (0, 0), (1, 1), (0, 1), (2, 1))],
1.5j)
self.assertIn(
((1, 1), (0, 1), (2, 1), (1, 1), (0, 0), (1, 1), (0, 1), (2, 1)),
op_b.terms)
self.assertEqual(
op_b.terms[((1, 1), (0, 1), (2, 1), (1, 1), (0, 0), (1, 1), (0, 1),
(2, 1))], -2.25j)
def test_imul_bad_multiplier(self):
op = DummyOperator1(((1, 1), (0, 1)), -1j)
with self.assertRaises(TypeError):
op *= "1"
def test_mul_by_scalarzero(self):
op = DummyOperator1(((1, 1), (0, 1)), -1j) * 0
self.assertNotIn(((0, 1), (1, 1)), op.terms)
self.assertIn(((1, 1), (0, 1)), op.terms)
self.assertEqual(op.terms[((1, 1), (0, 1))], 0.0)
def test_mul_bad_multiplier(self):
op = DummyOperator1(((1, 1), (0, 1)), -1j)
with self.assertRaises(TypeError):
op = op * "0.5"
def test_mul_sympy_coeff(self):
op = DummyOperator1(((1, 1), (0, 1)), -1j)
op = op * sympy.Symbol('x')
self.assertTrue(op.terms[((1, 1),
(0, 1))] - (-1j * sympy.Symbol('x')) == 0)
def test_mul_out_of_place(self):
op1 = DummyOperator1(((0, 1), (3, 1), (3, 0), (11, 1)), 3.j)
op2 = DummyOperator1(((1, 1), (3, 1), (8, 0)), 0.5)
op3 = op1 * op2
correct_coefficient = 3.0j * 0.5
correct_term = ((0, 1), (3, 1), (3, 0), (11, 1), (1, 1), (3, 1), (8, 0))
self.assertTrue(op1 == DummyOperator1(((0, 1), (3, 1), (3, 0),
(11, 1)), 3.j))
self.assertTrue(op2 == DummyOperator1(((1, 1), (3, 1), (8, 0)), 0.5))
self.assertTrue(
op3 == DummyOperator1(correct_term, correct_coefficient))
def test_mul_npfloat64(self):
op = DummyOperator1(((1, 0), (3, 1)), 0.5)
res = op * numpy.float64(0.5)
self.assertTrue(res == DummyOperator1(((1, 0), (3, 1)), 0.5 * 0.5))
def test_mul_multiple_terms(self):
op = DummyOperator1(((1, 0), (8, 1)), 0.5)
op += DummyOperator1(((1, 1), (9, 1)), 1.4j)
res = op * op
correct = DummyOperator1(((1, 0), (8, 1), (1, 0), (8, 1)), 0.5**2)
correct += (DummyOperator1(
((1, 0), (8, 1), (1, 1), (9, 1)), 0.7j) + DummyOperator1(
((1, 1), (9, 1), (1, 0), (8, 1)), 0.7j))
correct += DummyOperator1(((1, 1), (9, 1), (1, 1), (9, 1)), 1.4j**2)
self.assertTrue(res == correct)
def test_rmul_scalar_real(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = 0.5
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1 == res2)
def test_rmul_scalar_complex(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = 0.6j
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1 == res2)
def test_rmul_scalar_npfloat64(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = numpy.float64(2.303)
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1 == res2)
def test_rmul_scalar_npcomplex128(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = numpy.complex128(-1.5j + 7.7)
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1 == res2)
def test_rmul_bad_multiplier(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
with self.assertRaises(TypeError):
op = "0.5" * op
def test_truediv_and_div_real(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.5
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res == correct)
# Test if done out of place
self.assertTrue(op == original)
def test_truediv_and_div_complex(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.6j
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res == correct)
# Test if done out of place
self.assertTrue(op == original)
def test_truediv_and_div_npfloat64(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.float64(2.303)
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res == correct)
# Test if done out of place
self.assertTrue(op == original)
def test_truediv_and_div_npcomplex128(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.complex128(566.4j + 0.3)
original = copy.deepcopy(op)
res = op / divisor
correct = op * (1. / divisor)
self.assertTrue(res == correct)
# Test if done out of place
self.assertTrue(op == original)
def test_truediv_bad_divisor(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
with self.assertRaises(TypeError):
op = op / "0.5"
def test_itruediv_and_idiv_real(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.5
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op == correct)
# Test if done in-place
self.assertFalse(op == original)
def test_itruediv_and_idiv_complex(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = 0.6j
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op == correct)
# Test if done in-place
self.assertFalse(op == original)
def test_itruediv_and_idiv_npfloat64(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.float64(2.3030)
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op == correct)
# Test if done in-place
self.assertFalse(op == original)
def test_itruediv_and_idiv_npcomplex128(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
divisor = numpy.complex128(12.3 + 7.4j)
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
self.assertTrue(op == correct)
# Test if done in-place
self.assertFalse(op == original)
def test_itruediv_bad_divisor(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
with self.assertRaises(TypeError):
op /= "0.5"
def test_iadd_different_term(self):
term_a = ((1, 1), (3, 0), (8, 1))
term_b = ((1, 1), (3, 1), (8, 0))
a = DummyOperator1(term_a, 1.0)
a += DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], 0.5)
a += DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], 1.0)
def test_iadd_sympy(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = sympy.Symbol('a')
term_b = ((1, 1), (3, 1), (8, 0))
coeff_b = sympy.Symbol('b')
a = DummyOperator1(term_a, coeff_a)
a += DummyOperator1(term_b, coeff_b)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] - coeff_b == 0)
a += DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] - coeff_b - 0.5 == 0)
def test_add_sympy(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = sympy.Symbol('a')
term_b = ((1, 1), (3, 1), (8, 0))
coeff_b = sympy.Symbol('b')
a = DummyOperator1(term_a, coeff_a)
a = a + DummyOperator1(term_b, coeff_b)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] - coeff_b == 0)
a = a + DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] - coeff_b - 0.5 == 0)
def test_radd(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = 1
a = DummyOperator1(term_a, coeff_a)
b = 2 + a
self.assertTrue(b.constant == 2)
def test_sum_list(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = 1
term_b = ((1, 1), (3, 1), (8, 0))
coeff_b = 2
a = DummyOperator1(term_a, coeff_a)
b = DummyOperator1(term_b, coeff_b)
aplusb1 = sum([a, b])
aplusb2 = a + b
self.assertEqual(aplusb1, aplusb2)
def test_rsub(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = 1
a = DummyOperator1(term_a, coeff_a)
b = 2 - a
self.assertTrue(b.constant == 2)
b = b - 2
self.assertEqual(b, -1 * a)
def test_iadd_sympy_term_removal(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = sympy.Symbol('a')
a = DummyOperator1(term_a, coeff_a)
a += DummyOperator1(term_a, -coeff_a)
self.assertEqual(len(a.terms), 0)
def test_iadd_bad_addend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
op += "0.5"
def test_add(self):
term_a = ((1, 1), (3, 0), (8, 1))
term_b = ((1, 0), (3, 0), (8, 1))
a = DummyOperator1(term_a, 1.0)
b = DummyOperator1(term_b, 0.5)
res = a + b + b
self.assertEqual(len(res.terms), 2)
self.assertEqual(res.terms[term_a], 1.0)
self.assertEqual(res.terms[term_b], 1.0)
# Test out of place
self.assertTrue(a == DummyOperator1(term_a, 1.0))
self.assertTrue(b == DummyOperator1(term_b, 0.5))
def test_add_bad_addend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
_ = op + "0.5"
def test_sub(self):
term_a = ((1, 1), (3, 1), (8, 1))
term_b = ((1, 0), (3, 1), (8, 1))
a = DummyOperator1(term_a, 1.0)
b = DummyOperator1(term_b, 0.5)
res = a - b
self.assertEqual(len(res.terms), 2)
self.assertEqual(res.terms[term_a], 1.0)
self.assertEqual(res.terms[term_b], -0.5)
res2 = b - a
self.assertEqual(len(res2.terms), 2)
self.assertEqual(res2.terms[term_a], -1.0)
self.assertEqual(res2.terms[term_b], 0.5)
def test_sub_bad_subtrahend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
_ = op - "0.5"
def test_sub_sympy(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = sympy.Symbol('a')
term_b = ((1, 1), (3, 1), (8, 0))
coeff_b = sympy.Symbol('b')
a = DummyOperator1(term_a, coeff_a)
a = a - DummyOperator1(term_b, coeff_b)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] + coeff_b == 0)
a = a - DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] + coeff_b + 0.5 == 0)
def test_isub_different_term(self):
term_a = ((1, 1), (3, 1), (8, 0))
term_b = ((1, 0), (3, 1), (8, 1))
a = DummyOperator1(term_a, 1.0)
a -= DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], -0.5)
a -= DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertEqual(a.terms[term_a], 1.0)
self.assertEqual(a.terms[term_b], -1.0)
def test_isub_bad_addend(self):
op = DummyOperator1((), 1.0)
with self.assertRaises(TypeError):
op -= "0.5"
def test_isub_sympy(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = sympy.Symbol('a')
term_b = ((1, 1), (3, 1), (8, 0))
coeff_b = sympy.Symbol('b')
a = DummyOperator1(term_a, coeff_a)
a -= DummyOperator1(term_b, coeff_b)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] + coeff_b == 0)
a -= DummyOperator1(term_b, 0.5)
self.assertEqual(len(a.terms), 2)
self.assertTrue(a.terms[term_a] - coeff_a == 0)
self.assertTrue(a.terms[term_b] + coeff_b + 0.5 == 0)
def test_isub_sympy_term_removal(self):
term_a = ((1, 1), (3, 0), (8, 1))
coeff_a = sympy.Symbol('a')
a = DummyOperator1(term_a, coeff_a)
a -= DummyOperator1(term_a, coeff_a)
self.assertEqual(len(a.terms), 0)
def test_neg(self):
op = DummyOperator1(((1, 1), (3, 1), (8, 1)), 0.5)
_ = -op
# out of place
self.assertTrue(op == DummyOperator1(((1, 1), (3, 1), (8, 1)), 0.5))
correct = -1.0 * op
self.assertTrue(correct == -op)
def test_pow_square_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
squared = term**2
expected = DummyOperator1(ops + ops, coeff**2)
self.assertTrue(squared == term * term)
self.assertTrue(squared == expected)
def test_pow_zero_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
zerod = term**0
expected = DummyOperator1(())
self.assertTrue(expected == zerod)
def test_pow_one_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
self.assertTrue(term == term**1)
def test_pow_high_term(self):
coeff = 6.7j
ops = ((3, 1), (1, 0), (4, 1))
term = DummyOperator1(ops, coeff)
high = term**10
expected = DummyOperator1(ops * 10, coeff**10)
self.assertTrue(expected == high)
def test_pow_neg_error(self):
with self.assertRaises(ValueError):
_ = DummyOperator1()**-1
def test_pow_nonint_error(self):
with self.assertRaises(ValueError):
_ = DummyOperator1('3 2^')**0.5
def test_compress_terms(self):
op = (DummyOperator1('3^ 1', 0.3 + 3e-11j) +
DummyOperator1('2^ 3', 5e-10) + DummyOperator1('1^ 3', 1e-3))
op_compressed = (DummyOperator1('3^ 1', 0.3) +
DummyOperator1('1^ 3', 1e-3))
op.compress(1e-7)
self.assertTrue(op_compressed == op)
def test_compress_sympy(self):
op = (DummyOperator1('',
sympy.Symbol('x') + sympy.Symbol('y')) +
DummyOperator1('3^ 1',
sympy.Symbol('x') + 1e-7 - sympy.Symbol('x')))
op_compressed = DummyOperator1('',
sympy.Symbol('x') + sympy.Symbol('y'))
op.compress(1e-6)
self.assertTrue(op_compressed == op)
def test_str_sympy(self):
op = DummyOperator1("0^", sympy.Symbol('x'))
self.assertEqual(str(op), "x [0^]")
def test_str(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
self.assertEqual(str(op), "0.5 [1^ 3 8^]")
op = DummyOperator1((), 2)
self.assertEqual(str(op), "2 []")
op = DummyOperator1()
self.assertEqual(str(op), "0")
op = (DummyOperator1(((3, 1), (4, 1), (5, 0)), 1.0) + DummyOperator1(
((3, 1), (4, 1), (4, 0)), 2.0) + DummyOperator1(
((2, 1), (4, 1), (5, 0)), 1.0) + DummyOperator1(
((3, 0), (2, 1), (1, 1)), 2.0) + DummyOperator1(
((3, 0), (2, 0), (1, 1)), 2.0))
self.assertEqual(
str(op).strip(), """
1.0 [2^ 4^ 5] +
2.0 [3 2 1^] +
2.0 [3 2^ 1^] +
2.0 [3^ 4^ 4] +
1.0 [3^ 4^ 5]
""".strip())
op = (DummyOperator1(((3, 1), (4, 1), (5, 0)), 0.0) + DummyOperator1(
((3, 1), (4, 1), (4, 0)), 2.0))
self.assertEqual(str(op).strip(), """
2.0 [3^ 4^ 4]
""".strip())
def test_rep(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
# Not necessary, repr could do something in addition
self.assertEqual(repr(op), str(op))
class SymbolicOperatorTest2(unittest.TestCase):
"""Test the subclass DummyOperator2."""
def test_init_defaults(self):
loc_op = DummyOperator2()
self.assertTrue(len(loc_op.terms) == 0)
def test_init_tuple(self):
coefficient = 0.5
loc_op = ((0, 'X'), (5, 'Y'), (6, 'Z'))
qubit_op = DummyOperator2(loc_op, coefficient)
self.assertTrue(len(qubit_op.terms) == 1)
self.assertTrue(qubit_op.terms[loc_op] == coefficient)
def test_init_list(self):
coefficient = 0.6j
loc_op = [(0, 'X'), (5, 'Y'), (6, 'Z')]
qubit_op = DummyOperator2(loc_op, coefficient)
self.assertTrue(len(qubit_op.terms) == 1)
self.assertTrue(qubit_op.terms[tuple(loc_op)] == coefficient)
def test_init_str(self):
qubit_op = DummyOperator2('X0 Y5 Z12', -1.)
correct = ((0, 'X'), (5, 'Y'), (12, 'Z'))
self.assertTrue(correct in qubit_op.terms)
self.assertTrue(qubit_op.terms[correct] == -1.0)
def test_init_long_str(self):
qubit_op = DummyOperator2(
'(-2.0+3.0j) [X0 Y1] +\n\n -1.0[ X2 Y3 ] - []', -1.)
correct = \
DummyOperator2('X0 Y1', complex(2., -3.)) + \
DummyOperator2('X2 Y3', 1.) + \
DummyOperator2('', 1.)
self.assertEqual(len((qubit_op - correct).terms), 0)
reparsed_op = DummyOperator2(str(qubit_op))
self.assertEqual(len((qubit_op - reparsed_op).terms), 0)
qubit_op = DummyOperator2('[X0 X1] + [Y0 Y1]')
correct = DummyOperator2('X0 X1') + DummyOperator2('Y0 Y1')
self.assertTrue(qubit_op == correct)
self.assertTrue(qubit_op == DummyOperator2(str(qubit_op)))
def test_init_long_str_sympy(self):
coeff = sympy.Symbol('x')
qubit_op = DummyOperator2(
'(-2.0+3.0j) [X0 Y1] +\n\n -1.0[ X2 Y3 ] - []', -coeff)
correct = \
DummyOperator2('X0 Y1', complex(2., -3.) * coeff) + \
DummyOperator2('X2 Y3', coeff) + \
DummyOperator2('', coeff)
self.assertEqual(len((qubit_op - correct).terms), 0)
with self.assertRaises(ValueError):
_ = DummyOperator2(str(qubit_op))
def test_init_long_str_sympy_failure(self):
with self.assertRaises(ValueError):
_ = DummyOperator2('(x^) [X0 Y1]', -1)
def test_init_str_identity(self):
qubit_op = DummyOperator2('', 2.)
self.assertTrue(len(qubit_op.terms) == 1)
self.assertTrue(() in qubit_op.terms)
self.assertAlmostEqual(qubit_op.terms[()], 2.)
def test_init_bad_term(self):
with self.assertRaises(ValueError):
_ = DummyOperator2(2)
def test_init_bad_coefficient(self):
with self.assertRaises(ValueError):
_ = DummyOperator2('X0', "0.5")
def test_init_bad_action(self):
with self.assertRaises(ValueError):
_ = DummyOperator2('Q0')
def test_init_bad_action_in_tuple(self):
with self.assertRaises(ValueError):
_ = DummyOperator2(((1, 'Q'),))
def test_init_bad_qubit_num_in_tuple(self):
with self.assertRaises(ValueError):
_ = DummyOperator2((("1", 'X'),))
def test_init_bad_tuple(self):
with self.assertRaises(ValueError):
_ = DummyOperator2(((0, 1, 'X'),))
def test_init_bad_str(self):
with self.assertRaises(ValueError):
_ = DummyOperator2('X')
def test_init_bad_qubit_num(self):
with self.assertRaises(ValueError):
_ = DummyOperator2('X-1')
def test_compress(self):
a = DummyOperator2('X0', .9e-12)
self.assertTrue(len(a.terms) == 1)
a.compress()
self.assertTrue(len(a.terms) == 0)
a = DummyOperator2('X0', 1. + 1j)
a.compress(.5)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(a.terms[term] == 1. + 1j)
a = DummyOperator2('X0', 1.1 + 1j)
a.compress(1.)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(a.terms[term] == 1.1)
a = DummyOperator2('X0', 1.1 + 1j) + DummyOperator2('X1', 1.e-6j)
a.compress()
self.assertTrue(len(a.terms) == 2)
for term in a.terms:
self.assertTrue(isinstance(a.terms[term], complex))
a.compress(1.e-5)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(isinstance(a.terms[term], complex))
a.compress(1.)
self.assertTrue(len(a.terms) == 1)
for term in a.terms:
self.assertTrue(isinstance(a.terms[term], float))
def test_rmul_scalar(self):
multiplier = 0.5
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1 == res2)
def test_rmul_sympy(self):
multiplier = sympy.Symbol('x') + 3
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
res1 = op * multiplier
res2 = multiplier * op
zero_op = DummyOperator2()
self.assertTrue(res1 - res2 == zero_op)
def test_rmul_bad_multiplier(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
with self.assertRaises(TypeError):
op = "0.5" * op
def test_truediv_and_div(self):
divisor = 0.6j
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
op2 = copy.deepcopy(op)
original = copy.deepcopy(op)
res = op / divisor
res2 = op2.__div__(divisor) # To test python 2 version as well
correct = op * (1. / divisor)
self.assertTrue(res == correct)
self.assertTrue(res2 == correct)
# Test if done out of place
self.assertTrue(op == original)
self.assertTrue(op2 == original)
def test_truediv_bad_divisor(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
with self.assertRaises(TypeError):
op = op / "0.5"
def test_itruediv_and_idiv(self):
divisor = 2
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
op2 = copy.deepcopy(op)
original = copy.deepcopy(op)
correct = op * (1. / divisor)
op /= divisor
op2.__idiv__(divisor) # To test python 2 version as well
self.assertTrue(op == correct)
self.assertTrue(op2 == correct)
# Test if done in-place
self.assertTrue(not op == original)
self.assertTrue(not op2 == original)
def test_itruediv_bad_divisor(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
with self.assertRaises(TypeError):
op /= "0.5"
def test_iadd_cancellation(self):
term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))
term_b = ((1, 'X'), (3, 'Y'), (8, 'Z'))
a = DummyOperator2(term_a, 1.0)
a += DummyOperator2(term_b, -1.0)
self.assertTrue(len(a.terms) == 0)
def test_iadd_different_term(self):
term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))
term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))
a = DummyOperator2(term_a, 1.0)
a += DummyOperator2(term_b, 0.5)
self.assertTrue(len(a.terms) == 2)
self.assertAlmostEqual(a.terms[term_a], 1.0)
self.assertAlmostEqual(a.terms[term_b], 0.5)
a += DummyOperator2(term_b, 0.5)
self.assertTrue(len(a.terms) == 2)
self.assertAlmostEqual(a.terms[term_a], 1.0)
self.assertAlmostEqual(a.terms[term_b], 1.0)
def test_iadd_bad_addend(self):
op = DummyOperator2((), 1.0)
with self.assertRaises(TypeError):
op += "0.5"
def test_add(self):
term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))
term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))
a = DummyOperator2(term_a, 1.0)
b = DummyOperator2(term_b, 0.5)
res = a + b + b
self.assertTrue(len(res.terms) == 2)
self.assertAlmostEqual(res.terms[term_a], 1.0)
self.assertAlmostEqual(res.terms[term_b], 1.0)
# Test out of place
self.assertTrue(a == DummyOperator2(term_a, 1.0))
self.assertTrue(b == DummyOperator2(term_b, 0.5))
def test_add_bad_addend(self):
op = DummyOperator2((), 1.0)
with self.assertRaises(TypeError):
op = op + "0.5"
def test_sub(self):
term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))
term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))
a = DummyOperator2(term_a, 1.0)
b = DummyOperator2(term_b, 0.5)
res = a - b
self.assertTrue(len(res.terms) == 2)
self.assertAlmostEqual(res.terms[term_a], 1.0)
self.assertAlmostEqual(res.terms[term_b], -0.5)
res2 = b - a
self.assertTrue(len(res2.terms) == 2)
self.assertAlmostEqual(res2.terms[term_a], -1.0)
self.assertAlmostEqual(res2.terms[term_b], 0.5)
def test_sub_bad_subtrahend(self):
op = DummyOperator2((), 1.0)
with self.assertRaises(TypeError):
op = op - "0.5"
def test_isub_different_term(self):
term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))
term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))
a = DummyOperator2(term_a, 1.0)
a -= DummyOperator2(term_b, 0.5)
self.assertTrue(len(a.terms) == 2)
self.assertAlmostEqual(a.terms[term_a], 1.0)
self.assertAlmostEqual(a.terms[term_b], -0.5)
a -= DummyOperator2(term_b, 0.5)
self.assertTrue(len(a.terms) == 2)
self.assertAlmostEqual(a.terms[term_a], 1.0)
self.assertAlmostEqual(a.terms[term_b], -1.0)
def test_isub_bad_addend(self):
op = DummyOperator2((), 1.0)
with self.assertRaises(TypeError):
op -= "0.5"
def test_neg(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
# out of place
self.assertTrue(op == DummyOperator2(((1, 'X'), (3, 'Y'),
(8, 'Z')), 0.5))
correct = -1.0 * op
self.assertTrue(correct == -op)
def test_str(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
self.assertEqual(str(op), "0.5 [X1 Y3 Z8]")
op2 = DummyOperator2((), 2)
self.assertEqual(str(op2), "2 []")
op3 = (DummyOperator2(
((3, 'X'), (4, 'Z'), (5, 'Y')), 3.0) + DummyOperator2(
((1, 'X'), (4, 'Z'), (4, 'Z')), 2.0) + DummyOperator2(
((2, 'Z'), (4, 'Z'), (5, 'X')), 1.0) + DummyOperator2(
((3, 'Y'), (2, 'Y'), (1, 'Z')), 2.0) + DummyOperator2(
((3, 'Y'), (2, 'Y'), (1, 'Y')), 2.0))
self.assertEqual(
str(op3).strip(), """
2.0 [X1 Z4 Z4] +
2.0 [Y1 Y2 Y3] +
2.0 [Z1 Y2 Y3] +
1.0 [Z2 Z4 X5] +
3.0 [X3 Z4 Y5]
""".strip())
def test_str_empty(self):
op = DummyOperator2()
self.assertEqual(str(op), '0')
def test_str_out_of_order(self):
op = DummyOperator2(((3, 'Y'), (1, 'X'), (8, 'Z')), 0.5)
self.assertEqual(str(op), '0.5 [X1 Y3 Z8]')
def test_str_multiple_terms(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
op += DummyOperator2(((1, 'Y'), (3, 'Y'), (8, 'Z')), 0.6)
self.assertTrue((str(op) == "0.5 [X1 Y3 Z8] +\n0.6 [Y1 Y3 Z8]" or
str(op) == "0.6 [Y1 Y3 Z8] +\n0.5 [X1 Y3 Z8]"))
op2 = DummyOperator2((), 2)
self.assertEqual(str(op2), "2 []")
def test_rep(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)
# Not necessary, repr could do something in addition
self.assertEqual(repr(op), str(op))
def test_norm(self):
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), 1)
op += DummyOperator2(((2, 'Z'), (3, 'Y')), 1)
self.assertAlmostEqual(op.induced_norm(2), numpy.sqrt(2.))
def test_norm_sympy(self):
x_sym = sympy.Symbol('x')
y_sym = sympy.Symbol('y')
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), x_sym)
op += DummyOperator2(((2, 'Z'), (3, 'Y')), y_sym)
norm = op.induced_norm(2)
self.assertTrue(norm - (abs(x_sym)**2 + abs(y_sym)**2)**(0.5) == 0)
def test_many_body_order_sympy(self):
x_sym = sympy.Symbol('x')
y_sym = sympy.Symbol('y')
op = DummyOperator2(((1, 'X'), (3, 'Y'), (8, 'Z')), x_sym)
op += DummyOperator2(((2, 'Z'), (3, 'Y')), y_sym)
self.assertEqual(op.many_body_order(), 3)
def test_tracenorm_zero(self):
op = DummyOperator2()
self.assertFalse(op.induced_norm())
|
54a4baca72fbe3521a3d8c7814cc9f30fd8ed249
|
a5b66100762c0ca7076de26645ef1b732e0ee2d8
|
/test_python_toolbox/test_combi/test_chain_space.py
|
7fbcc4174ed073d3b4ad6600736275495be6cd68
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
cool-RR/python_toolbox
|
63400bbc004c63b32fe421b668a64bede4928e90
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
refs/heads/master
| 2022-01-26T14:41:29.194288
| 2021-12-25T06:49:40
| 2021-12-25T06:49:40
| 3,066,283
| 130
| 15
|
NOASSERTION
| 2021-12-25T06:49:41
| 2011-12-29T01:39:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
test_chain_space.py
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
from python_toolbox import cute_testing
from python_toolbox.combi import *
def test_chain_spaces():
chain_space = ChainSpace((range(3), 'meow', range(22, 19, -1)))
assert tuple(chain_space) == (0, 1, 2, 'm', 'e', 'o', 'w', 22, 21, 20)
assert len(chain_space) == chain_space.length == 10
assert bool(chain_space) is True
for i, item in enumerate(chain_space):
assert chain_space[i] == item
assert chain_space.index(item) == i
assert chain_space == chain_space
assert 0 in chain_space
assert 'm' in chain_space
assert [] not in chain_space
with cute_testing.RaiseAssertor(ValueError): chain_space.index('nope')
with cute_testing.RaiseAssertor(IndexError): chain_space[-11]
with cute_testing.RaiseAssertor(IndexError): chain_space[-110]
with cute_testing.RaiseAssertor(IndexError): chain_space[11]
with cute_testing.RaiseAssertor(IndexError): chain_space[1100]
assert chain_space[-1] == 20
assert chain_space[-2] == 21
assert chain_space[-10] == 0
assert not ChainSpace(())
|
e05fbc4dd42b664795590c3e986b4e1cf279ca4d
|
11d0cc8470722b737ac1d83610f227ffb98ea71a
|
/old_code/lgf_vweb.py
|
ddd2ed7a53f95422197a6d4d9e3b3b54995f2770
|
[] |
no_license
|
EdoardoCarlesi/PyRCODIO
|
d86e5298877f08b597f92a07aac0c9b634dbfa39
|
d3f84efb7aeec0032ef6bde839fe9440ee82b392
|
refs/heads/master
| 2021-04-03T04:58:26.219884
| 2021-03-21T12:22:19
| 2021-03-21T12:22:19
| 125,036,704
| 148
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,979
|
py
|
lgf_vweb.py
|
from libio.read_ascii import *
import pickle
import numpy as np
import os
# Root path for the vweb files
dirBase='/z/carlesi/CLUES/DATA/512/'
#dirBase='/z/carlesi/CLUES/DATA/LGF/SNAPS/512/'
#dirBase='/work2/eduardo/DATA/512/VWEB/'
fileWeb1='vweb_lgf_054.000064.Vweb-ascii'
fileWeb2='vweb_127.000064.Vweb-ascii'
#fileLGs='saved/lgs_'
#'saved/lgs_r_5000.0_mMin4e+11_512_00_00.pkl'
fileLGs='saved/lgs_r_5000.0_mMin4e+11_512_'
#fileWeb='saved/lgf_'
fileEV='saved/lgs_evs_512_5000.pkl'
# The LGs used for the v-web will be attached here
fileSelectedLGs='saved/lgs_select_512_5000.pkl'
#fileEV='saved/lgs_evs_all.pkl'
#fileSelectedLGs='saved/lgs_select_all.pkl'
# Do a loop
iSta=0
iEnd=100
gSta=0
gEnd=30
# Main web parameters
boxSize=100000.0
gridSize=64
norm = 1.e+3
ev1 = []; ev2 = []; ev3 = []
lgs = []
# Main loop
for iRun in range(iSta, iEnd):
iRunStr = '%02d' % iRun
# Sub loop
for gRun in range(gSta, gEnd):
gRunStr = '%02d' % gRun
subRunStr = iRunStr + '_' + gRunStr
thisFileWeb1 = dirBase + subRunStr + '/' + fileWeb1
thisFileWeb2 = dirBase + subRunStr + '/' + fileWeb2
thisFileLGs = fileLGs + subRunStr + '.pkl'
# print(thisFileWeb)
# print(thisFileLGs)
# Check if files exist
exist1 = os.path.isfile(thisFileWeb1)
exist2 = os.path.isfile(thisFileWeb2)
exist3 = os.path.isfile(thisFileLGs)
if exist1:
thisFileWeb = thisFileWeb1
exist0 = True
if exist2:
thisFileWeb = thisFileWeb2
exist0 = True
# If they do then read the vweb and the lg
if exist0 and exist3:
print('Found vWeb file: ', thisFileWeb)
f_lg = open(thisFileLGs, 'rb')
#f_web = open(thisFileWeb, 'rb')
thisLG = pickle.load(f_lg)
for lg in thisLG:
if lg.code != 'EMPTY':
thisWeb = read_vweb(thisFileWeb, gridSize, boxSize)
thisCOM = lg.get_com()
coord = []
for ix in range(0, 3):
jx = int(gridSize * thisCOM[ix] / boxSize)
coord.append(jx)
thisEV = thisWeb.evals[:, coord[0], coord[1], coord[2]]
if abs(thisEV[0]) < 0.001:
norm = 1.e+3
else:
norm = 1.0
if (thisEV[0] * norm > -10.00 and thisEV[2] < 10.035):
print(thisEV[0] * norm, thisEV[1] * norm, thisEV[2] * norm, norm)
ev1.append(thisEV[0] * norm)
ev2.append(thisEV[1] * norm)
ev3.append(thisEV[2] * norm)
lgs.append(thisLG)
evs = [ev1, ev2, ev3]
f_evs = open(fileEV, 'wb')
pickle.dump(evs, f_evs)
f_lgs = open(fileSelectedLGs, 'wb')
pickle.dump(lgs, f_lgs)
|
e1ddcc0f0bdc731f2982634aa575ecd0004dd1ed
|
b43e0384ec4bfacec2571a2bb41ce563267db449
|
/jesse/modes/import_candles_mode/drivers/Binance/BinancePerpetualFuturesTestnet.py
|
fe418fa5af3d382ddf939c9afc68fd9bc5a5e3c5
|
[
"MIT"
] |
permissive
|
jesse-ai/jesse
|
55b73448b767492a20f8bc56c28306a1a24f8599
|
fadb03b5fcc06f0655c6a5d877435fb872a97c5e
|
refs/heads/master
| 2023-08-24T15:28:52.875208
| 2023-08-24T13:53:31
| 2023-08-24T13:53:31
| 156,847,937
| 5,259
| 722
|
MIT
| 2023-09-10T13:51:26
| 2018-11-09T10:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
BinancePerpetualFuturesTestnet.py
|
from .BinanceMain import BinanceMain
from jesse.enums import exchanges
class BinancePerpetualFuturesTestnet(BinanceMain):
def __init__(self) -> None:
from .BinanceSpot import BinanceSpot
super().__init__(
name=exchanges.BINANCE_PERPETUAL_FUTURES_TESTNET,
rest_endpoint='https://testnet.binancefuture.com/fapi/v1/klines',
backup_exchange_class=BinanceSpot
)
|
b08b638b6cc84bcabd4e73424e4b8ce6d0ffea91
|
3daa53a2190f365ee2e2acae39ca4e84919f2f50
|
/test/probe/test_empty_device_handoff.py
|
9a7f4234ca73b82fe0da15effab51ef4fd5ab1cf
|
[
"Apache-2.0"
] |
permissive
|
openstack/swift
|
4c8e4a14c1c6f7efb049f983ede28e89bd2e9140
|
f06e5369579599648cc78e4b556887bc6d978c2b
|
refs/heads/master
| 2023-08-28T15:04:33.200849
| 2023-08-24T20:35:07
| 2023-08-24T21:05:48
| 790,019
| 2,370
| 957
|
Apache-2.0
| 2023-06-22T02:45:53
| 2010-07-22T01:50:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,656
|
py
|
test_empty_device_handoff.py
|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.obj.diskfile import get_data_dir
from swift.common.exceptions import ClientException
from test.probe.common import (
kill_server, ReplProbeTest, start_server, get_server_number)
from swift.common.utils import readconf
from swift.common.manager import Manager
class TestEmptyDevice(ReplProbeTest):
def _get_objects_dir(self, onode):
device = onode['device']
_, node_id = get_server_number((onode['ip'], onode['port']),
self.ipport2server)
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s' % (devices, device)
return obj_dir
def test_main(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
# Kill one container/obj primary server
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Delete the default data directory for objects on the primary server
obj_dir = '%s/%s' % (self._get_objects_dir(onode),
get_data_dir(self.policy))
shutil.rmtree(obj_dir, True)
self.assertFalse(os.path.exists(obj_dir))
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Stash the on disk data from a primary for future comparison with the
# handoff - this may not equal 'VERIFY' if for example the proxy has
# crypto enabled
direct_get_data = direct_client.direct_get_object(
onodes[1], opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
kill_server((node['ip'], node['port']), self.ipport2server)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers
for node in onodes[1:]:
start_server((node['ip'], node['port']), self.ipport2server)
self.assertFalse(os.path.exists(obj_dir))
# We've indirectly verified the handoff node has the object, but
# let's directly verify it.
# Directly to handoff server assert we can get container/obj
another_onode = next(self.object_ring.get_more_nodes(opart))
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj,
headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# Assert container listing (via proxy and directly) has container/obj
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
timeout = time.time() + 5
found_objs_on_cnode = []
while time.time() < timeout:
for cnode in [c for c in cnodes if cnodes not in
found_objs_on_cnode]:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
found_objs_on_cnode.append(cnode)
if len(found_objs_on_cnode) >= len(cnodes):
break
time.sleep(0.3)
if len(found_objs_on_cnode) < len(cnodes):
missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
cnodes if cnode not in found_objs_on_cnode]
raise Exception('Container servers %r did not know about object' %
missing)
# Bring the first container/obj primary server back up
start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert that it doesn't have container/obj yet
self.assertFalse(os.path.exists(obj_dir))
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
self.assertFalse(os.path.exists(obj_dir))
else:
self.fail("Expected ClientException but didn't get it")
# Run object replication for first container/obj primary server
_, num = get_server_number(
(onode['ip'], onode.get('replication_port', onode['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=num)
# Run object replication for handoff node
_, another_num = get_server_number(
(another_onode['ip'],
another_onode.get('replication_port', another_onode['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=another_num)
# Assert the first container/obj primary server now has container/obj
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# Assert the handoff server no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
if __name__ == '__main__':
main()
|
0c63b22e9fe9a1d81378ee93fcbf6824eeb0719c
|
65078b8087c2040cf0188e2550ea298d20518f62
|
/src/bentoml/_internal/utils/uri.py
|
ec9032dbf62ad243ec9d86f2631225e87b69034b
|
[
"Apache-2.0"
] |
permissive
|
bentoml/BentoML
|
20ab6f8351b1c5cd116d6d60a28098246a1581b3
|
4a14f073d8a3e700aff29483b17ea053058c0c63
|
refs/heads/main
| 2023-09-05T16:03:08.909692
| 2023-09-04T18:54:33
| 2023-09-04T18:54:33
| 178,976,529
| 5,712
| 732
|
Apache-2.0
| 2023-09-14T20:07:54
| 2019-04-02T01:39:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
uri.py
|
import os
import pathlib
from urllib.parse import quote
from urllib.parse import unquote
from urllib.parse import urlparse
from urllib.request import url2pathname
import psutil
def path_to_uri(path: str) -> str:
"""
Convert a path to a URI.
Args:
path: Path to convert to URI.
Returns:
URI string. (quoted, absolute)
"""
path = os.path.abspath(path)
if psutil.WINDOWS:
return pathlib.PureWindowsPath(path).as_uri()
if psutil.POSIX:
return pathlib.PurePosixPath(path).as_uri()
raise ValueError("Unsupported OS")
def uri_to_path(uri: str) -> str:
"""
Convert a file URI to a path.
Args:
uri: URI to convert to path.
Returns:
Path string. (unquoted)
"""
parsed = urlparse(uri)
if parsed.scheme not in ("file", "filesystem", "unix"):
raise ValueError("Unsupported URI scheme")
host = "{0}{0}{mnt}{0}".format(os.path.sep, mnt=parsed.netloc)
return os.path.normpath(os.path.join(host, url2pathname(unquote(parsed.path))))
def encode_path_for_uri(path: str) -> str:
"""Percent-encode non-URL characters in a path."""
return quote(path.replace(os.sep, "/"))
|
8302d9b5756a01ca43debb66849db4d81921fdc9
|
3b6e0306b7a14de87ec40bec5e44039a3c7d55d8
|
/dts_test_project/customers/models.py
|
b6f1fe10d4041aad9bca6b9e2e147d8873925234
|
[
"MIT"
] |
permissive
|
bernardopires/django-tenant-schemas
|
5a4810714c6d4f4472c5088cc63076e69bc6da5b
|
a77655c2170b9e6d50003cd5806cc87589dd3bd8
|
refs/heads/master
| 2023-08-10T20:14:05.867506
| 2023-07-02T19:48:20
| 2023-07-02T19:48:20
| 4,761,556
| 1,234
| 473
|
MIT
| 2023-07-02T19:50:06
| 2012-06-23T10:54:34
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
models.py
|
from django.db import models
from tenant_schemas.models import TenantMixin
class Client(TenantMixin):
name = models.CharField(max_length=100)
description = models.TextField(max_length=200)
created_on = models.DateField(auto_now_add=True)
|
f1606ca4faf5d20449becf732ed313b5e3be881e
|
45826bdfebbd1d7638ab607906ac480031d6118b
|
/lib/models/nets/segfix.py
|
d645586d9a2daeeafc5ea71a982ed1a83d5cf2d2
|
[
"MIT"
] |
permissive
|
openseg-group/openseg.pytorch
|
b75cec5c95b6ff71707d8daf7806001bab89ecb3
|
aefc75517b09068d7131a69420bc5f66cb41f0ee
|
refs/heads/master
| 2023-09-06T10:19:57.749113
| 2022-08-07T09:10:20
| 2022-08-07T09:10:20
| 166,743,301
| 1,227
| 159
|
MIT
| 2021-07-14T06:10:44
| 2019-01-21T03:34:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
segfix.py
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: RainbowSecret
## Microsoft Research
## yuyua@microsoft.com
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import pdb
import cv2
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from lib.models.backbones.backbone_selector import BackboneSelector
from lib.models.tools.module_helper import ModuleHelper
from lib.utils.helpers.offset_helper import DTOffsetConfig
from lib.models.backbones.hrnet.hrnet_backbone import BasicBlock
class SegFix_HRNet(nn.Module):
def __init__(self, configer):
super(SegFix_HRNet, self).__init__()
self.configer = configer
self.backbone = BackboneSelector(configer).get_backbone()
backbone_name = self.configer.get('network', 'backbone')
width = int(backbone_name[-2:])
if 'hrnet2x' in backbone_name:
in_channels = width * 31
else:
in_channels = width * 15
num_masks = 2
num_directions = DTOffsetConfig.num_classes
mid_channels = 256
self.dir_head = nn.Sequential(
nn.Conv2d(in_channels,
mid_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False),
ModuleHelper.BNReLU(mid_channels,
bn_type=self.configer.get(
'network', 'bn_type')),
nn.Conv2d(mid_channels,
num_directions,
kernel_size=1,
stride=1,
padding=0,
bias=False))
self.mask_head = nn.Sequential(
nn.Conv2d(in_channels,
mid_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False),
ModuleHelper.BNReLU(mid_channels,
bn_type=self.configer.get(
'network', 'bn_type')),
nn.Conv2d(mid_channels,
num_masks,
kernel_size=1,
stride=1,
padding=0,
bias=False))
def forward(self, x_):
x = self.backbone(x_)
_, _, h, w = x[0].size()
feat1 = x[0]
for i in range(1, len(x)):
x[i] = F.interpolate(x[i],
size=(h, w),
mode='bilinear',
align_corners=True)
feats = torch.cat(x, 1)
mask_map = self.mask_head(feats)
dir_map = self.dir_head(feats)
return mask_map, dir_map
|
862e1ad9aeae517a717a5dcb168a4acf13a4da77
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/util/headers.py
|
86c8b4810cb292d6be03cbb1ee7d68143bb6929f
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 512
|
py
|
headers.py
|
import base64
def parse_basic_auth(header_value):
"""
Attempts to parse the given header value as a Base64-encoded Basic auth header.
"""
if not header_value:
return None
parts = header_value.split(" ")
if len(parts) != 2 or parts[0].lower() != "basic":
return None
try:
basic_parts = base64.b64decode(parts[1]).split(":", 1)
if len(basic_parts) != 2:
return None
return basic_parts
except ValueError:
return None
|
7bccc6971fa462e2f8a6ee8ef5d5182a51eda016
|
b42752f5b4509c2641cf840304900073ae287464
|
/script/get_workspace_symbols.py
|
77f9de9c23e717a3835f39e29323d0a5bce827d5
|
[
"MIT"
] |
permissive
|
antoinemadec/coc-fzf
|
d854f4dcdf982d3d3cb4b5ffc3ba23edcf07fd32
|
5fae5a15497750483e21fc207aa6005f340f02f2
|
refs/heads/master
| 2023-08-25T04:31:57.828317
| 2023-01-30T18:34:01
| 2023-01-30T18:34:01
| 236,263,116
| 442
| 40
| null | 2023-01-30T18:34:03
| 2020-01-26T03:42:15
|
Vim Script
|
UTF-8
|
Python
| false
| false
| 3,742
|
py
|
get_workspace_symbols.py
|
#!/usr/bin/env python3
import argparse
import re
from urllib.parse import unquote
from pynvim import attach
# --------------------------------------------------------------
# functions
# --------------------------------------------------------------
kind_dict = {}
kind_dict[1] = 'File'
kind_dict[2] = 'Module'
kind_dict[3] = 'Namespace'
kind_dict[4] = 'Package'
kind_dict[5] = 'Class'
kind_dict[6] = 'Method'
kind_dict[7] = 'Property'
kind_dict[8] = 'Field'
kind_dict[9] = 'Constructor'
kind_dict[10] = 'Enum'
kind_dict[11] = 'Interface'
kind_dict[12] = 'Function'
kind_dict[13] = 'Variable'
kind_dict[14] = 'Constant'
kind_dict[15] = 'String'
kind_dict[16] = 'Number'
kind_dict[17] = 'Boolean'
kind_dict[18] = 'Array'
kind_dict[19] = 'Object'
kind_dict[20] = 'Key'
kind_dict[21] = 'Null'
kind_dict[22] = 'EnumMember'
kind_dict[23] = 'Struct'
kind_dict[24] = 'Event'
kind_dict[25] = 'Operator'
kind_dict[26] = 'TypeParameter'
def get_kind(val):
return kind_dict.get(val, 'Unkown')
def get_exclude_re_patterns(symbol_excludes):
re_patterns = []
for pattern in symbol_excludes:
re_pattern = re.sub(r'\.', r'\.', pattern)
re_pattern = re.sub(r'\*\*', r'.|', re_pattern)
re_pattern = re.sub(r'\*', r'[^/]*', re_pattern)
re_pattern = re.sub(r'\|', r'*', re_pattern)
re_patterns.append(re_pattern)
return re_patterns
def file_is_excluded(filename, exclude_re_patterns):
for pattern in exclude_re_patterns:
if re.match(pattern, filename):
return True
return False
# --------------------------------------------------------------
# execution
# --------------------------------------------------------------
parser = argparse.ArgumentParser(
description='connect to running Nvim to get CocAction("getWorkspaceSymbols", query)')
parser.add_argument('socket', help="returned by Nvim's v:servername")
parser.add_argument('bufnr', help="Nvim buffer where query should be done")
parser.add_argument(
'query', help="query to pass to CocAction('getWorkspaceSymbols')")
parser.add_argument('ansi_typedef', help="ansi code for highlight Typedef")
parser.add_argument('ansi_comment', help="ansi code for highlight Comment")
parser.add_argument('ansi_ignore', help="ansi code for highlight Ignore")
parser.add_argument('symbol_excludes', help="Coc config symbol excludes list")
parser.add_argument(
'--kind', nargs=1, help='only search for a specific "kind" (class, function, etc)')
args = parser.parse_args()
nvim = attach('socket', path=args.socket)
items = nvim.call('CocAction', 'getWorkspaceSymbols', args.query,
int(args.bufnr))
if items is None or len(items) == 0:
exit(0)
symbol_excludes = eval(args.symbol_excludes)
exclude_re_patterns = get_exclude_re_patterns(symbol_excludes)
ignored_colon = args.ansi_ignore.replace('STRING', ':')
for item in items:
lnum = item['location']['range']['start']['line'] + 1
col = item['location']['range']['start']['character']
filename = unquote(item['location']['uri'].replace('file://', ''))
kind = get_kind(item['kind'])
# filters
if args.kind is not None and args.kind[0].lower() != kind.lower():
continue
if file_is_excluded(filename, exclude_re_patterns):
continue
name_with_ansi = item['name']
kind_with_ansi = args.ansi_typedef.replace('STRING', '[' + kind + ']')
filename_with_ansi = args.ansi_comment.replace('STRING', filename)
lnum_col_with_ansi = args.ansi_ignore.replace('STRING',
':' + str(lnum) + ':' + str(col))
print("{0} {1}{2}{3}{4}".format(
name_with_ansi, kind_with_ansi, ignored_colon, filename_with_ansi,
lnum_col_with_ansi))
|
e6d436625488d30a76c6903f315b39d0d2c694a3
|
b097b7caa954a0447bef9a7144e15fbc1b08a96b
|
/examples/tutorials/streamreader_advanced_tutorial.py
|
7d3a4bd09eeb136d0bf39a5c265386b57dd78038
|
[
"CC-BY-NC-4.0",
"BSD-2-Clause"
] |
permissive
|
pytorch/audio
|
3fa7006404020c9ce731f27b94f0257195d2efe3
|
e057d7d144e2716588b80255f0a143662fd5c10d
|
refs/heads/main
| 2023-09-03T15:46:06.918708
| 2023-09-02T00:39:15
| 2023-09-02T00:39:15
| 90,321,822
| 2,319
| 675
|
BSD-2-Clause
| 2023-09-13T22:09:20
| 2017-05-05T00:38:05
|
Python
|
UTF-8
|
Python
| false
| false
| 13,963
|
py
|
streamreader_advanced_tutorial.py
|
"""
StreamReader Advanced Usages
============================
**Author**: `Moto Hira <moto@meta.com>`__
This tutorial is the continuation of
`StreamReader Basic Usages <./streamreader_basic_tutorial.html>`__.
This shows how to use :py:class:`~torchaudio.io.StreamReader` for
- Device inputs, such as microphone, webcam and screen recording
- Generating synthetic audio / video
- Applying preprocessing with custom filter expressions
"""
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
import IPython
import matplotlib.pyplot as plt
from torchaudio.io import StreamReader
base_url = "https://download.pytorch.org/torchaudio/tutorial-assets"
AUDIO_URL = f"{base_url}/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
VIDEO_URL = f"{base_url}/stream-api/NASAs_Most_Scientifically_Complex_Space_Observatory_Requires_Precision-MP4.mp4"
######################################################################
# Audio / Video device input
# --------------------------
#
# .. seealso::
#
# - `Accelerated Video Decoding with NVDEC <../hw_acceleration_tutorial.html>`__.
# - `Online ASR with Emformer RNN-T <./online_asr_tutorial.html>`__.
# - `Device ASR with Emformer RNN-T <./device_asr.html>`__.
#
# Given that the system has proper media devices and libavdevice is
# configured to use the devices, the streaming API can
# pull media streams from these devices.
#
# To do this, we pass additional parameters ``format`` and ``option``
# to the constructor. ``format`` specifies the device component and
# ``option`` dictionary is specific to the specified component.
#
# The exact arguments to be passed depend on the system configuration.
# Please refer to https://ffmpeg.org/ffmpeg-devices.html for the detail.
#
# The following example illustrates how one can do this on MacBook Pro.
#
# First, we need to check the available devices.
#
# .. code::
#
# $ ffmpeg -f avfoundation -list_devices true -i ""
# [AVFoundation indev @ 0x143f04e50] AVFoundation video devices:
# [AVFoundation indev @ 0x143f04e50] [0] FaceTime HD Camera
# [AVFoundation indev @ 0x143f04e50] [1] Capture screen 0
# [AVFoundation indev @ 0x143f04e50] AVFoundation audio devices:
# [AVFoundation indev @ 0x143f04e50] [0] MacBook Pro Microphone
#
# We use `FaceTime HD Camera` as video device (index 0) and
# `MacBook Pro Microphone` as audio device (index 0).
#
# If we do not pass any ``option``, the device uses its default
# configuration. The decoder might not support the configuration.
#
# .. code::
#
# >>> StreamReader(
# ... src="0:0", # The first 0 means `FaceTime HD Camera`, and
# ... # the second 0 indicates `MacBook Pro Microphone`.
# ... format="avfoundation",
# ... )
# [avfoundation @ 0x125d4fe00] Selected framerate (29.970030) is not supported by the device.
# [avfoundation @ 0x125d4fe00] Supported modes:
# [avfoundation @ 0x125d4fe00] 1280x720@[1.000000 30.000000]fps
# [avfoundation @ 0x125d4fe00] 640x480@[1.000000 30.000000]fps
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ...
# RuntimeError: Failed to open the input: 0:0
#
# By providing ``option``, we can change the format that the device
# streams to a format supported by decoder.
#
# .. code::
#
# >>> streamer = StreamReader(
# ... src="0:0",
# ... format="avfoundation",
# ... option={"framerate": "30", "pixel_format": "bgr0"},
# ... )
# >>> for i in range(streamer.num_src_streams):
# ... print(streamer.get_src_stream_info(i))
# SourceVideoStream(media_type='video', codec='rawvideo', codec_long_name='raw video', format='bgr0', bit_rate=0, width=640, height=480, frame_rate=30.0)
# SourceAudioStream(media_type='audio', codec='pcm_f32le', codec_long_name='PCM 32-bit floating point little-endian', format='flt', bit_rate=3072000, sample_rate=48000.0, num_channels=2)
#
######################################################################
#
# .. _lavfi:
#
# Synthetic source streams
# ------------------------
#
# As a part of device integration, ffmpeg provides a "virtual device"
# interface. This interface provides synthetic audio / video data
# generation using libavfilter.
#
# To use this, we set ``format=lavfi`` and provide a filter description
# to ``src``.
#
# The detail of filter description can be found at
# https://ffmpeg.org/ffmpeg-filters.html
#
######################################################################
# Audio Examples
# ~~~~~~~~~~~~~~
#
######################################################################
# Sine wave
# ^^^^^^^^^
# https://ffmpeg.org/ffmpeg-filters.html#sine
#
# .. code::
#
# StreamReader(src="sine=sample_rate=8000:frequency=360", format="lavfi")
#
# .. raw:: html
#
# <audio controls>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/sine.wav">
# </audio>
# <img
# src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/sine.png"
# class="sphx-glr-single-img" style="width:80%">
#
######################################################################
# Signal with arbitral expression
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# https://ffmpeg.org/ffmpeg-filters.html#aevalsrc
#
# .. code::
#
# # 5 Hz binaural beats on a 360 Hz carrier
# StreamReader(
# src=(
# 'aevalsrc='
# 'sample_rate=8000:'
# 'exprs=0.1*sin(2*PI*(360-5/2)*t)|0.1*sin(2*PI*(360+5/2)*t)'
# ),
# format='lavfi',
# )
#
# .. raw:: html
#
# <audio controls>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/aevalsrc.wav">
# </audio>
# <img
# src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/aevalsrc.png"
# class="sphx-glr-single-img" style="width:80%">
#
######################################################################
# Noise
# ^^^^^
# https://ffmpeg.org/ffmpeg-filters.html#anoisesrc
#
# .. code::
#
# StreamReader(src="anoisesrc=color=pink:sample_rate=8000:amplitude=0.5", format="lavfi")
#
# .. raw:: html
#
# <audio controls>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/anoisesrc.wav">
# </audio>
# <img
# src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/anoisesrc.png"
# class="sphx-glr-single-img" style="width:80%">
#
######################################################################
# Video Examples
# ~~~~~~~~~~~~~~
#
######################################################################
# Cellular automaton
# ^^^^^^^^^^^^^^^^^^
# https://ffmpeg.org/ffmpeg-filters.html#cellauto
#
# .. code::
#
# StreamReader(src=f"cellauto", format="lavfi")
#
# .. raw:: html
#
# <video controls autoplay loop muted>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/cellauto.mp4">
# </video>
#
######################################################################
# Mandelbrot
# ^^^^^^^^^^
# https://ffmpeg.org/ffmpeg-filters.html#cellauto
#
# .. code::
#
# StreamReader(src=f"mandelbrot", format="lavfi")
#
# .. raw:: html
#
# <video controls autoplay loop muted>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/mandelbrot.mp4">
# </video>
#
######################################################################
# MPlayer Test patterns
# ^^^^^^^^^^^^^^^^^^^^^
# https://ffmpeg.org/ffmpeg-filters.html#mptestsrc
#
# .. code::
#
# StreamReader(src=f"mptestsrc", format="lavfi")
#
# .. raw:: html
#
# <video controls autoplay loop muted width=192 height=192>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/mptestsrc.mp4">
# </video>
#
######################################################################
# John Conway's life game
# ^^^^^^^^^^^^^^^^^^^^^^^
# https://ffmpeg.org/ffmpeg-filters.html#life
#
# .. code::
#
# StreamReader(src=f"life", format="lavfi")
#
# .. raw:: html
#
# <video controls autoplay loop muted>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/life.mp4">
# </video>
#
######################################################################
# Sierpinski carpet/triangle fractal
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# https://ffmpeg.org/ffmpeg-filters.html#sierpinski
#
# .. code::
#
# StreamReader(src=f"sierpinski", format="lavfi")
#
# .. raw:: html
#
# <video controls autoplay loop muted>
# <source src="https://download.pytorch.org/torchaudio/tutorial-assets/stream-api/sierpinski.mp4">
# </video>
#
######################################################################
# Custom filters
# --------------
#
# When defining an output stream, you can use
# :py:meth:`~torchaudio.io.StreamReader.add_audio_stream` and
# :py:meth:`~torchaudio.io.StreamReader.add_video_stream` methods.
#
# These methods take ``filter_desc`` argument, which is a string
# formatted according to ffmpeg's
# `filter expression <https://ffmpeg.org/ffmpeg-filters.html>`_.
#
# The difference between ``add_basic_(audio|video)_stream`` and
# ``add_(audio|video)_stream`` is that ``add_basic_(audio|video)_stream``
# constructs the filter expression and passes it to the same underlying
# implementation. Everything ``add_basic_(audio|video)_stream`` can be
# achieved with ``add_(audio|video)_stream``.
#
# .. note::
#
# - When applying custom filters, the client code must convert
# the audio/video stream to one of the formats that torchaudio
# can convert to tensor format.
# This can be achieved, for example, by applying
# ``format=pix_fmts=rgb24`` to video stream and
# ``aformat=sample_fmts=fltp`` to audio stream.
# - Each output stream has separate filter graph. Therefore, it is
# not possible to use different input/output streams for a
# filter expression. However, it is possible to split one input
# stream into multiple of them, and merge them later.
#
######################################################################
# Audio Examples
# ~~~~~~~~~~~~~~
#
#
# fmt: off
descs = [
# No filtering
"anull",
# Apply a highpass filter then a lowpass filter
"highpass=f=200,lowpass=f=1000",
# Manipulate spectrogram
(
"afftfilt="
"real='hypot(re,im)*sin(0)':"
"imag='hypot(re,im)*cos(0)':"
"win_size=512:"
"overlap=0.75"
),
# Manipulate spectrogram
(
"afftfilt="
"real='hypot(re,im)*cos((random(0)*2-1)*2*3.14)':"
"imag='hypot(re,im)*sin((random(1)*2-1)*2*3.14)':"
"win_size=128:"
"overlap=0.8"
),
]
# fmt: on
######################################################################
#
sample_rate = 8000
streamer = StreamReader(AUDIO_URL)
for desc in descs:
streamer.add_audio_stream(
frames_per_chunk=40000,
filter_desc=f"aresample={sample_rate},{desc},aformat=sample_fmts=fltp",
)
chunks = next(streamer.stream())
def _display(i):
print("filter_desc:", streamer.get_out_stream_info(i).filter_description)
fig, axs = plt.subplots(2, 1)
waveform = chunks[i][:, 0]
axs[0].plot(waveform)
axs[0].grid(True)
axs[0].set_ylim([-1, 1])
plt.setp(axs[0].get_xticklabels(), visible=False)
axs[1].specgram(waveform, Fs=sample_rate)
fig.tight_layout()
return IPython.display.Audio(chunks[i].T, rate=sample_rate)
######################################################################
# Original
# ^^^^^^^^
#
_display(0)
######################################################################
# Highpass / lowpass filter
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
_display(1)
######################################################################
# FFT filter - Robot 🤖
# ^^^^^^^^^^^^^^^^^^^^^
#
_display(2)
######################################################################
# FFT filter - Whisper
# ^^^^^^^^^^^^^^^^^^^^
#
_display(3)
######################################################################
# Video Examples
# ~~~~~~~~~~~~~~
#
# fmt: off
descs = [
# No effect
"null",
# Split the input stream and apply horizontal flip to the right half.
(
"split [main][tmp];"
"[tmp] crop=iw/2:ih:0:0, hflip [flip];"
"[main][flip] overlay=W/2:0"
),
# Edge detection
"edgedetect=mode=canny",
# Rotate image by randomly and fill the background with brown
"rotate=angle=-random(1)*PI:fillcolor=brown",
# Manipulate pixel values based on the coordinate
"geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'"
]
# fmt: on
######################################################################
#
streamer = StreamReader(VIDEO_URL)
for desc in descs:
streamer.add_video_stream(
frames_per_chunk=30,
filter_desc=f"fps=10,{desc},format=pix_fmts=rgb24",
)
streamer.seek(12)
chunks = next(streamer.stream())
def _display(i):
print("filter_desc:", streamer.get_out_stream_info(i).filter_description)
_, axs = plt.subplots(1, 3, figsize=(8, 1.9))
chunk = chunks[i]
for j in range(3):
axs[j].imshow(chunk[10 * j + 1].permute(1, 2, 0))
axs[j].set_axis_off()
plt.tight_layout()
######################################################################
# Original
# ^^^^^^^^
_display(0)
######################################################################
# Mirror
# ^^^^^^
_display(1)
######################################################################
# Edge detection
# ^^^^^^^^^^^^^^^
_display(2)
######################################################################
# Random rotation
# ^^^^^^^^^^^^^^^
_display(3)
######################################################################
# Pixel manipulation
# ^^^^^^^^^^^^^^^^^^
_display(4)
######################################################################
#
# Tag: :obj:`torchaudio.io`
|
bbb80891269a2e7573657a9a0dae21e9fa50eb51
|
0349e502733a4c25f020fbcad4715f598d686799
|
/tutorials/python_invocation_example.py
|
091a137ff491d25ca91ce85395bb7ad48b74b13a
|
[
"Apache-2.0"
] |
permissive
|
IntelLabs/coach
|
679592e9887f5788229fef9d77a1a7975e959bc4
|
2c60cb5acd8cd3c9c381a5066c208e69fc273c7b
|
refs/heads/master
| 2023-09-05T17:56:19.435416
| 2022-12-11T17:54:06
| 2022-12-11T17:54:06
| 105,468,219
| 497
| 102
|
Apache-2.0
| 2021-12-27T09:52:30
| 2017-10-01T19:27:43
|
Python
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
python_invocation_example.py
|
from rl_coach.agents.clipped_ppo_agent import ClippedPPOAgentParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import SimpleSchedule
graph_manager = BasicRLGraphManager(
agent_params=ClippedPPOAgentParameters(),
env_params=GymVectorEnvironment(level='CartPole-v0'),
schedule_params=SimpleSchedule()
)
graph_manager.heatup(EnvironmentSteps(100))
graph_manager.train_and_act(EnvironmentSteps(100))
|
4437b3f690e2d92e7cf70f98abfe16b3eef3cd30
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/stripe/stripe/api_resources/alipay_account.pyi
|
f18475a6774f1d327ae5facda9506b71fee763b2
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 700
|
pyi
|
alipay_account.pyi
|
from _typeshed import Incomplete
from typing import NoReturn
from stripe.api_resources.abstract import (
DeletableAPIResource as DeletableAPIResource,
UpdateableAPIResource as UpdateableAPIResource,
)
from stripe.api_resources.customer import Customer as Customer
class AlipayAccount(DeletableAPIResource, UpdateableAPIResource):
OBJECT_NAME: str
def instance_url(self): ...
@classmethod
def modify(cls, customer, id, **params): ...
@classmethod
def retrieve(
cls,
id,
api_key: Incomplete | None = ...,
stripe_version: Incomplete | None = ...,
stripe_account: Incomplete | None = ...,
**params,
) -> NoReturn: ...
|
9b5b4c794237efedfe351eb363874afb558f1e8a
|
d947e08de661346384f81e8bdb8aca8f201376f1
|
/src/dist.py
|
7ebddf5061ff85632f7c6e48db13d8587cecc0b8
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/kotlin-web-site
|
48813e763124dbade34234f6215906239677f88a
|
cd33fb0ae0c9caef79c1eac9b42d70485866f1ba
|
refs/heads/master
| 2023-09-01T04:43:38.247946
| 2023-08-30T13:46:30
| 2023-08-30T13:53:51
| 14,397,208
| 1,509
| 4,069
|
Apache-2.0
| 2023-09-14T09:27:55
| 2013-11-14T14:26:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
dist.py
|
from bs4 import BeautifulSoup
from os import path, walk
dist_path = path.join(path.dirname(__file__), "../", "dist")
def get_dist_page_content(url):
path_file = dist_path + url
if url.endswith('/'):
path_file += 'index.html'
if path.exists(path_file):
with open(path_file, 'r', encoding="UTF-8") as file:
return file.read()
raise Exception('Bad response during indexing')
def get_dist_page_xml(url):
html_content = get_dist_page_content(url)
return BeautifulSoup(html_content, "html.parser")
def get_dist_page_type(url):
page_type = None
if url.endswith('/') or url.endswith('.html'):
page_type = 'Page'
if url.startswith('community'):
page_type = 'Page_Community'
if url.startswith('docs/reference'):
page_type = 'Page_Reference'
if url.startswith('docs/tutorials'):
page_type = 'Page_Tutorial'
if url.endswith('404.html'):
page_type = 'Page_NotFound'
parsed = get_dist_page_xml(url)
if url.startswith("/api/latest/"):
page_type = "Page_API_stdlib" if "jvm/stdlib" in url else "Page_API_test"
if url.startswith("/spec/"):
page_type = "Page_Spec"
if parsed.select_one("body[data-article-props]"):
page_type = 'Page_Documentation'
if parsed.find("meta", {"http-equiv": "refresh"}):
page_type = 'Redirect'
if url.endswith('pdf'):
page_type = 'File_Pdf'
if url.endswith('package-list') or url.endswith('index.yml'):
page_type = 'File_Text'
return page_type
def get_dist_pages():
paths = []
if path.isdir(dist_path):
for root, dirnames, filenames in walk(dist_path):
for filename in filenames:
prefix_path = root[len(dist_path):]
if not prefix_path: prefix_path = "/"
url = path.join(prefix_path, filename)
if url.endswith('index.html'): url = url[:-10]
paths.append((url, get_dist_page_type(url)))
return paths
|
0aa56c2a2472fd20b9af07a0909b594b2baaed74
|
a2882062b8ce4252afeb92ae5bade1299bc97501
|
/synthesize.py
|
f03fc08ab5ecccf4d5ca1357bde330f75ddd5db2
|
[
"MIT"
] |
permissive
|
keonlee9420/Comprehensive-E2E-TTS
|
526425b5abc7b623a24e0d0b60af8f28e6753acc
|
0b5d0a597f95f3fd4b3a80ae8cf5d07013cadb83
|
refs/heads/main
| 2023-05-24T01:14:03.042390
| 2022-06-06T12:30:10
| 2022-06-06T12:30:10
| 475,779,002
| 127
| 21
| null | 2022-06-06T12:30:11
| 2022-03-30T08:02:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,692
|
py
|
synthesize.py
|
import re
import os
import json
import argparse
from string import punctuation
import torch
import yaml
import numpy as np
from torch.utils.data import DataLoader
from g2p_en import G2p
from tqdm import tqdm
import audio as Audio
from utils.model import get_model
from utils.tools import get_configs_of, to_device, synth_samples
from dataset import Dataset, TextDataset
from text import text_to_sequence
def preprocess_english(text, preprocess_config):
text = text.rstrip(punctuation)
g2p = G2p()
phones = []
words = re.split(r"([,;.\-\?\!\s+])", text)
for w in words:
phones += list(filter(lambda p: p != " ", g2p(w)))
phones = "{" + "}{".join(phones) + "}"
phones = re.sub(r"\{[^\w\s]?\}", "{sp}", phones)
phones = phones.replace("}{", " ")
print("Raw Text Sequence: {}".format(text))
print("Phoneme Sequence: {}".format(phones))
sequence = np.array(
text_to_sequence(
phones, preprocess_config["preprocessing"]["text"]["text_cleaners"]
)
)
return np.array(sequence)
def synthesize(device, model, args, configs, batchs, control_values, STFT):
preprocess_config, model_config, train_config = configs
pitch_control, energy_control, duration_control = control_values
def synthesize_(batch):
batch = to_device(batch, device)
with torch.no_grad():
# Forward
output = model(
*(batch[2:-1]),
spker_embeds=batch[-1],
p_control=pitch_control,
e_control=energy_control,
d_control=duration_control,
cut=False,
)
synth_samples(
batch,
output,
model_config,
preprocess_config,
train_config["path"]["result_path"],
args,
STFT,
)
if args.teacher_forced:
for batchs_ in tqdm(batchs):
for batch in batchs_:
batch = list(batch)
# batch[9] = None # set mel None
# batch[10] = None # set mel_len None
# batch[11] = None # set max_mel_len None
# batch[16] = None # set attn_prior None
synthesize_(batch)
else:
for batch in tqdm(batchs):
synthesize_(batch)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, required=True)
parser.add_argument("--path_tag", type=str, default="")
parser.add_argument("--teacher_forced", action="store_true")
parser.add_argument(
"--mode",
type=str,
choices=["batch", "single"],
required=True,
help="Synthesize a whole dataset or a single sentence",
)
parser.add_argument(
"--source",
type=str,
default=None,
help="path to a source file with format like train.txt and val.txt, for batch mode only",
)
parser.add_argument(
"--text",
type=str,
default=None,
help="raw text to synthesize, for single-sentence mode only",
)
parser.add_argument(
"--speaker_id",
type=str,
default="p225",
help="speaker ID for multi-speaker synthesis, for single-sentence mode only",
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="name of dataset",
)
parser.add_argument(
"--pitch_control",
type=float,
default=1.0,
help="control the pitch of the whole utterance, larger value for higher pitch",
)
parser.add_argument(
"--energy_control",
type=float,
default=1.0,
help="control the energy of the whole utterance, larger value for larger volume",
)
parser.add_argument(
"--duration_control",
type=float,
default=1.0,
help="control the speed of the whole utterance, larger value for slower speaking rate",
)
args = parser.parse_args()
# Check source texts
if args.mode == "batch":
assert args.text is None
if args.teacher_forced:
assert args.source is None
else:
assert args.source is not None
if args.mode == "single":
assert args.source is None and args.text is not None and not args.teacher_forced
# Read Config
preprocess_config, model_config, train_config = get_configs_of(args.dataset)
configs = (preprocess_config, model_config, train_config)
if preprocess_config["preprocessing"]["pitch"]["pitch_type"] == "cwt":
from utils.pitch_tools import get_lf0_cwt
preprocess_config["preprocessing"]["pitch"]["cwt_scales"] = get_lf0_cwt(np.ones(10))[1]
path_tag = "_{}".format(args.path_tag) if args.path_tag != "" else args.path_tag
train_config["path"]["ckpt_path"] = train_config["path"]["ckpt_path"]+"{}".format(path_tag)
train_config["path"]["log_path"] = train_config["path"]["log_path"]+"{}".format(path_tag)
train_config["path"]["result_path"] = train_config["path"]["result_path"]+"{}".format(path_tag)
os.makedirs(
os.path.join(train_config["path"]["result_path"], str(args.restore_step)), exist_ok=True)
# Set Device
torch.manual_seed(train_config["seed"])
if torch.cuda.is_available():
torch.cuda.manual_seed(train_config["seed"])
device = torch.device('cuda')
else:
device = torch.device('cpu')
print("Device of E2ETTS:", device)
# Get model
model = get_model(args, configs, device, train=False)
# Logging
STFT = Audio.stft.TorchSTFT(preprocess_config)
# Preprocess texts
if args.mode == "batch":
# Get dataset
# Get dataset
if args.teacher_forced:
dataset = Dataset(
"val.txt", preprocess_config, model_config, train_config, sort=False, drop_last=False
)
else:
dataset = TextDataset(args.source, preprocess_config, model_config)
batchs = DataLoader(
dataset,
batch_size=8,
collate_fn=dataset.collate_fn,
)
if args.mode == "single":
ids = raw_texts = [args.text[:100]]
# Speaker Info
load_spker_embed = model_config["multi_speaker"] \
and preprocess_config["preprocessing"]["speaker_embedder"] != 'none'
with open(os.path.join(preprocess_config["path"]["preprocessed_path"], "speakers.json")) as f:
speaker_map = json.load(f)
speakers = np.array([speaker_map[args.speaker_id]]) if model_config["multi_speaker"] else np.array([0]) # single speaker is allocated 0
spker_embed = np.load(os.path.join(
preprocess_config["path"]["preprocessed_path"],
"spker_embed",
"{}-spker_embed.npy".format(args.speaker_id),
)) if load_spker_embed else None
if preprocess_config["preprocessing"]["text"]["language"] == "en":
texts = np.array([preprocess_english(args.text, preprocess_config)])
else:
raise NotImplementedError
text_lens = np.array([len(texts[0])])
batchs = [(ids, raw_texts, speakers, texts, text_lens, max(text_lens), spker_embed)]
control_values = args.pitch_control, args.energy_control, args.duration_control
synthesize(device, model, args, configs, batchs, control_values, STFT)
|
d7e7afae5cc91f681801d69f56095028a4bd467c
|
9784a90cac667e8e0aaba0ca599b4255b215ec67
|
/tensorflow2/tf2cv/models/nvpattexp.py
|
1b92025774308d8f998b98ecbbd3ed97f99c29fc
|
[
"MIT"
] |
permissive
|
osmr/imgclsmob
|
d2f48f01ca541b20119871393eca383001a96019
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
refs/heads/master
| 2022-07-09T14:24:37.591824
| 2021-12-14T10:15:31
| 2021-12-14T10:15:31
| 140,285,687
| 3,017
| 624
|
MIT
| 2022-07-04T15:18:37
| 2018-07-09T12:57:46
|
Python
|
UTF-8
|
Python
| false
| false
| 10,488
|
py
|
nvpattexp.py
|
"""
Neural Voice Puppetry Audio-to-Expression net for speech-driven facial animation, implemented in TensorFlow.
Original paper: 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566.
"""
__all__ = ['NvpAttExp', 'nvpattexp116bazel76']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import DenseBlock, ConvBlock, ConvBlock1d, SelectableDense, SimpleSequential, is_channels_first
class NvpAttExpEncoder(nn.Layer):
"""
Neural Voice Puppetry Audio-to-Expression encoder.
Parameters:
----------
audio_features : int
Number of audio features (characters/sounds).
audio_window_size : int
Size of audio window (for time related audio features).
seq_len : int, default
Size of feature window.
encoder_features : int
Number of encoder features.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
audio_features,
audio_window_size,
seq_len,
encoder_features,
data_format="channels_last",
**kwargs):
super(NvpAttExpEncoder, self).__init__(**kwargs)
self.audio_features = audio_features
self.audio_window_size = audio_window_size
self.seq_len = seq_len
self.data_format = data_format
conv_channels = (32, 32, 64, 64)
conv_slopes = (0.02, 0.02, 0.2, 0.2)
fc_channels = (128, 64, encoder_features)
fc_slopes = (0.02, 0.02, None)
att_conv_channels = (16, 8, 4, 2, 1)
att_conv_slopes = 0.02
in_channels = audio_features
self.conv_branch = SimpleSequential(name="conv_branch")
for i, (out_channels, slope) in enumerate(zip(conv_channels, conv_slopes)):
self.conv_branch.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=(2, 1),
padding=(1, 0),
use_bias=True,
use_bn=False,
activation=nn.LeakyReLU(alpha=slope),
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
self.fc_branch = SimpleSequential(name="fc_branch")
for i, (out_channels, slope) in enumerate(zip(fc_channels, fc_slopes)):
activation = nn.LeakyReLU(alpha=slope) if slope is not None else "tanh"
self.fc_branch.add(DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
activation=activation,
data_format=data_format,
name="fc{}".format(i + 1)))
in_channels = out_channels
self.att_conv_branch = SimpleSequential(name="att_conv_branch")
for i, out_channels, in enumerate(att_conv_channels):
self.att_conv_branch.add(ConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
use_bias=True,
use_bn=False,
activation=nn.LeakyReLU(alpha=att_conv_slopes),
data_format=data_format,
name="att_conv{}".format(i + 1)))
in_channels = out_channels
self.att_fc = DenseBlock(
in_channels=seq_len,
out_channels=seq_len,
use_bias=True,
use_bn=False,
activation=nn.Softmax(axis=1),
data_format=data_format,
name="att_fc")
def call(self, x, training=None):
batch = x.shape[0]
batch_seq_len = batch * self.seq_len
if is_channels_first(self.data_format):
x = tf.reshape(x, shape=(-1, 1, self.audio_window_size, self.audio_features))
x = tf.transpose(x, perm=(0, 3, 2, 1))
x = self.conv_branch(x)
x = tf.squeeze(x, axis=-1)
x = tf.reshape(x, shape=(batch_seq_len, 1, -1))
x = self.fc_branch(x)
x = tf.reshape(x, shape=(batch, self.seq_len, -1))
x = tf.transpose(x, perm=(0, 2, 1))
y = x[:, :, (self.seq_len // 2)]
w = self.att_conv_branch(x)
w = tf.squeeze(w, axis=1)
w = self.att_fc(w)
w = tf.expand_dims(w, axis=-1)
else:
x = tf.transpose(x, perm=(0, 3, 1, 2))
x = tf.reshape(x, shape=(-1, 1, self.audio_window_size, self.audio_features))
x = tf.transpose(x, perm=(0, 2, 3, 1))
x = tf.transpose(x, perm=(0, 1, 3, 2))
x = self.conv_branch(x)
x = tf.squeeze(x, axis=1)
x = self.fc_branch(x)
x = tf.reshape(x, shape=(batch, self.seq_len, -1))
y = x[:, (self.seq_len // 2), :]
w = self.att_conv_branch(x)
w = tf.squeeze(w, axis=-1)
w = self.att_fc(w)
w = tf.expand_dims(w, axis=-1)
x = tf.transpose(x, perm=(0, 2, 1))
x = tf.keras.backend.batch_dot(x, w)
x = tf.squeeze(x, axis=-1)
return x, y
class NvpAttExp(tf.keras.Model):
"""
Neural Voice Puppetry Audio-to-Expression model from 'Neural Voice Puppetry: Audio-driven Facial Reenactment,'
https://arxiv.org/abs/1912.05566.
Parameters:
----------
audio_features : int, default 29
Number of audio features (characters/sounds).
audio_window_size : int, default 16
Size of audio window (for time related audio features).
seq_len : int, default 8
Size of feature window.
base_persons : int, default 116
Number of base persons (identities).
blendshapes : int, default 76
Number of 3D model blendshapes.
encoder_features : int, default 32
Number of encoder features.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
audio_features=29,
audio_window_size=16,
seq_len=8,
base_persons=116,
blendshapes=76,
encoder_features=32,
data_format="channels_last",
**kwargs):
super(NvpAttExp, self).__init__(**kwargs)
self.base_persons = base_persons
self.data_format = data_format
self.encoder = NvpAttExpEncoder(
audio_features=audio_features,
audio_window_size=audio_window_size,
seq_len=seq_len,
encoder_features=encoder_features,
data_format=data_format,
name="encoder")
self.decoder = SelectableDense(
in_channels=encoder_features,
out_channels=blendshapes,
use_bias=False,
num_options=base_persons,
name="decoder")
def call(self, x, pid, training=None):
x, y = self.encoder(x, training=training)
x = self.decoder(x, pid)
y = self.decoder(y, pid)
return x, y
def get_nvpattexp(base_persons,
blendshapes,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Neural Voice Puppetry Audio-to-Expression model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
blendshapes : int
Number of 3D model blendshapes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = NvpAttExp(
base_persons=base_persons,
blendshapes=blendshapes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def nvpattexp116bazel76(**kwargs):
"""
Neural Voice Puppetry Audio-to-Expression model for 116 base persons and Bazel topology with 76 blendshapes from
'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_nvpattexp(base_persons=116, blendshapes=76, model_name="nvpattexp116bazel76", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
# data_format = "channels_first"
data_format = "channels_last"
pretrained = False
models = [
nvpattexp116bazel76,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
seq_len = 8
audio_window_size = 16
audio_features = 29
blendshapes = 76
x = tf.random.normal((batch, seq_len, audio_window_size, audio_features) if is_channels_first(data_format) else
(batch, audio_window_size, audio_features, seq_len))
pid = tf.fill(dims=(batch,), value=3)
y1, y2 = net(x, pid)
assert (y1.shape == y2.shape == (batch, blendshapes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nvpattexp116bazel76 or weight_count == 327397)
if __name__ == "__main__":
_test()
|
46dc69599f4261e9b2ef6df2f322f7c15be8ffbe
|
233676e340835a58e8041bdc82c313e599af415c
|
/aslam_offline_calibration/kalibr/python/kalibr_imu_camera_calibration/__init__.py
|
1c4c3ecefc43bee6bbf3042b66e4815fd583dc84
|
[
"BSD-3-Clause"
] |
permissive
|
ethz-asl/kalibr
|
9213daa87ed191ce1e05fba9f7424204c2d9734c
|
94bb8437a72a0d97a491097a7085bf3db4f93bba
|
refs/heads/master
| 2023-08-29T17:04:47.774244
| 2023-08-14T02:08:46
| 2023-08-14T02:08:46
| 20,293,077
| 3,744
| 1,341
|
NOASSERTION
| 2023-09-10T02:18:47
| 2014-05-29T12:31:48
|
C++
|
UTF-8
|
Python
| false
| false
| 124
|
py
|
__init__.py
|
from .IccCalibrator import *
from . import IccUtil as util
from . import IccPlots as plots
from . import IccSensors as sens
|
daa0113695a073ff357c0d404a4ad744a542691d
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/networkx/utils/tests/test_decorators.py
|
f8c5e2440a82f12112e8d3b91b4fe64f539c8f0d
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 8,712
|
py
|
test_decorators.py
|
import tempfile
import os
import random
from nose.tools import *
from nose import SkipTest
import networkx as nx
from networkx.utils.decorators import open_file, not_implemented_for
from networkx.utils.decorators import nodes_or_number, preserve_random_state, \
py_random_state, np_random_state, random_state
from networkx.utils.misc import PythonRandomInterface
def test_not_implemented_decorator():
@not_implemented_for('directed')
def test1(G):
pass
test1(nx.Graph())
@raises(KeyError)
def test_not_implemented_decorator_key():
@not_implemented_for('foo')
def test1(G):
pass
test1(nx.Graph())
@raises(nx.NetworkXNotImplemented)
def test_not_implemented_decorator_raise():
@not_implemented_for('graph')
def test1(G):
pass
test1(nx.Graph())
class TestOpenFileDecorator(object):
def setUp(self):
self.text = ['Blah... ', 'BLAH ', 'BLAH!!!!']
self.fobj = tempfile.NamedTemporaryFile('wb+', delete=False)
self.name = self.fobj.name
def write(self, path):
for text in self.text:
path.write(text.encode('ascii'))
@open_file(1, 'r')
def read(self, path):
return path.readlines()[0]
@staticmethod
@open_file(0, 'wb')
def writer_arg0(path):
path.write('demo'.encode('ascii'))
@open_file(1, 'wb+')
def writer_arg1(self, path):
self.write(path)
@open_file(2, 'wb')
def writer_arg2default(self, x, path=None):
if path is None:
with tempfile.NamedTemporaryFile('wb+') as fh:
self.write(fh)
else:
self.write(path)
@open_file(4, 'wb')
def writer_arg4default(self, x, y, other='hello', path=None, **kwargs):
if path is None:
with tempfile.NamedTemporaryFile('wb+') as fh:
self.write(fh)
else:
self.write(path)
@open_file('path', 'wb')
def writer_kwarg(self, **kwargs):
path = kwargs.get('path', None)
if path is None:
with tempfile.NamedTemporaryFile('wb+') as fh:
self.write(fh)
else:
self.write(path)
def test_writer_arg0_str(self):
self.writer_arg0(self.name)
def test_writer_arg0_fobj(self):
self.writer_arg0(self.fobj)
def test_writer_arg0_pathlib(self):
try:
import pathlib
self.writer_arg0(pathlib.Path(self.name))
except ImportError:
return
def test_writer_arg1_str(self):
self.writer_arg1(self.name)
assert_equal(self.read(self.name), ''.join(self.text))
def test_writer_arg1_fobj(self):
self.writer_arg1(self.fobj)
assert_false(self.fobj.closed)
self.fobj.close()
assert_equal(self.read(self.name), ''.join(self.text))
def test_writer_arg2default_str(self):
self.writer_arg2default(0, path=None)
self.writer_arg2default(0, path=self.name)
assert_equal(self.read(self.name), ''.join(self.text))
def test_writer_arg2default_fobj(self):
self.writer_arg2default(0, path=self.fobj)
assert_false(self.fobj.closed)
self.fobj.close()
assert_equal(self.read(self.name), ''.join(self.text))
def test_writer_arg2default_fobj_path_none(self):
self.writer_arg2default(0, path=None)
def test_writer_arg4default_fobj(self):
self.writer_arg4default(0, 1, dog='dog', other='other')
self.writer_arg4default(0, 1, dog='dog', other='other', path=self.name)
assert_equal(self.read(self.name), ''.join(self.text))
def test_writer_kwarg_str(self):
self.writer_kwarg(path=self.name)
assert_equal(self.read(self.name), ''.join(self.text))
def test_writer_kwarg_fobj(self):
self.writer_kwarg(path=self.fobj)
self.fobj.close()
assert_equal(self.read(self.name), ''.join(self.text))
def test_writer_kwarg_path_none(self):
self.writer_kwarg(path=None)
def tearDown(self):
self.fobj.close()
os.unlink(self.name)
@preserve_random_state
def test_preserve_random_state():
try:
import numpy.random
r = numpy.random.random()
except ImportError:
return
assert(abs(r - 0.61879477158568) < 1e-16)
class TestRandomState(object):
@classmethod
def setUp(cls):
global np
try:
import numpy as np
except ImportError:
raise SkipTest('NumPy not available.')
@random_state(1)
def instantiate_random_state(self, random_state):
assert_true(isinstance(random_state, np.random.RandomState))
return random_state.random_sample()
@np_random_state(1)
def instantiate_np_random_state(self, random_state):
assert_true(isinstance(random_state, np.random.RandomState))
return random_state.random_sample()
@py_random_state(1)
def instantiate_py_random_state(self, random_state):
assert_true(isinstance(random_state, random.Random) or
isinstance(random_state, PythonRandomInterface))
return random_state.random()
def test_random_state_None(self):
np.random.seed(42)
rv = np.random.random_sample()
np.random.seed(42)
assert_equal(rv, self.instantiate_random_state(None))
np.random.seed(42)
assert_equal(rv, self.instantiate_np_random_state(None))
random.seed(42)
rv = random.random()
random.seed(42)
assert_equal(rv, self.instantiate_py_random_state(None))
def test_random_state_np_random(self):
np.random.seed(42)
rv = np.random.random_sample()
np.random.seed(42)
assert_equal(rv, self.instantiate_random_state(np.random))
np.random.seed(42)
assert_equal(rv, self.instantiate_np_random_state(np.random))
np.random.seed(42)
assert_equal(rv, self.instantiate_py_random_state(np.random))
def test_random_state_int(self):
np.random.seed(42)
np_rv = np.random.random_sample()
random.seed(42)
py_rv = random.random()
np.random.seed(42)
seed = 1
rval = self.instantiate_random_state(seed)
rval_expected = np.random.RandomState(seed).rand()
assert_true(rval, rval_expected)
rval = self.instantiate_np_random_state(seed)
rval_expected = np.random.RandomState(seed).rand()
assert_true(rval, rval_expected)
# test that global seed wasn't changed in function
assert_equal(np_rv, np.random.random_sample())
random.seed(42)
rval = self.instantiate_py_random_state(seed)
rval_expected = random.Random(seed).random()
assert_true(rval, rval_expected)
# test that global seed wasn't changed in function
assert_equal(py_rv, random.random())
def test_random_state_np_random_RandomState(self):
np.random.seed(42)
np_rv = np.random.random_sample()
np.random.seed(42)
seed = 1
rng = np.random.RandomState(seed)
rval = self.instantiate_random_state(rng)
rval_expected = np.random.RandomState(seed).rand()
assert_true(rval, rval_expected)
rval = self.instantiate_np_random_state(seed)
rval_expected = np.random.RandomState(seed).rand()
assert_true(rval, rval_expected)
rval = self.instantiate_py_random_state(seed)
rval_expected = np.random.RandomState(seed).rand()
assert_true(rval, rval_expected)
# test that global seed wasn't changed in function
assert_equal(np_rv, np.random.random_sample())
def test_random_state_py_random(self):
seed = 1
rng = random.Random(seed)
rv = self.instantiate_py_random_state(rng)
assert_true(rv, random.Random(seed).random())
assert_raises(ValueError, self.instantiate_random_state, rng)
assert_raises(ValueError, self.instantiate_np_random_state, rng)
@raises(nx.NetworkXError)
def test_random_state_string_arg_index():
@random_state('a')
def make_random_state(rs):
pass
rstate = make_random_state(1)
@raises(nx.NetworkXError)
def test_py_random_state_string_arg_index():
@py_random_state('a')
def make_random_state(rs):
pass
rstate = make_random_state(1)
@raises(nx.NetworkXError)
def test_random_state_invalid_arg_index():
@random_state(2)
def make_random_state(rs):
pass
rstate = make_random_state(1)
@raises(nx.NetworkXError)
def test_py_random_state_invalid_arg_index():
@py_random_state(2)
def make_random_state(rs):
pass
rstate = make_random_state(1)
|
d1a420cad3400ad805676037c38aa575a25833e8
|
820b6af9fd43b270749224bb278e5f714f655ac9
|
/Utilities/Maintenance/vtk_reindent_code.py
|
5ac86ee88d3b7a381f45762c70a165aefd6d4084
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/VTK
|
49dee7d4f83401efce8826f1759cd5d9caa281d1
|
dd4138e17f1ed5dfe6ef1eab0ff6643fdc07e271
|
refs/heads/master
| 2023-09-01T10:21:57.496189
| 2023-09-01T08:20:15
| 2023-09-01T08:21:05
| 631,615
| 2,253
| 1,243
|
NOASSERTION
| 2023-09-14T07:53:03
| 2010-04-27T15:12:58
|
C++
|
UTF-8
|
Python
| false
| false
| 16,384
|
py
|
vtk_reindent_code.py
|
#!/usr/bin/env python
"""
Usage: python vtk_reindent_code.py [--test] <file1> [<file2> ...]
This script takes old-style "Whitesmiths" indented VTK source files as
input, and re-indents the braces according to the new VTK style.
Only the brace indentation is modified.
If called with the --test option, then it will print an error message
for each file that it would modify, but it will not actually modify the
files.
Written by David Gobbi on Sep 30, 2015.
"""
import sys
import os
import re
def reindent(filename, dry_run=False):
"""Reindent a file from Whitesmiths style to Allman style"""
# The first part of this function clears all strings and comments
# where non-grammatical braces might be hiding. These changes will
# not be saved back to the file, they just simplify the parsing.
# look for ', ", /*, and //
keychar = re.compile(r"""[/"']""")
# comments of the form /* */
c_comment = re.compile(r"\/\*(\*(?!\/)|[^*])*\*\/")
c_comment_start = re.compile(r"\/\*(\*(?!\/)|[^*])*$")
c_comment_end = re.compile(r"^(\*(?!\/)|[^*])*\*\/")
# comments of the form //
cpp_comment = re.compile(r"\/\/.*")
# string literals ""
string_literal = re.compile(r'"([^\\"]|\\.)*"')
string_literal_start = re.compile(r'"([^\\"]|\\.)*\\$')
string_literal_end = re.compile(r'^([^\\"]|\\.)*"')
# character literals ''
char_literal = re.compile(r"'([^\\']|\\.)*'")
char_literal_start = re.compile(r"'([^\\']|\\.)*\\$")
char_literal_end = re.compile(r"^([^\\']|\\.)*'")
# read the file
try:
f = open(filename)
lines = f.readlines()
f.close()
except:
sys.stderr.write(filename + ": ")
sys.stderr.write(str(sys.exc_info()[1]) + "\n")
sys.exit(1)
# convert strings to "", char constants to '', and remove comments
n = len(lines) # 'lines' is the input
newlines = [] # 'newlines' is the output
cont = None # set if e.g. we found /* and we are looking for */
for i in range(n):
line = lines[i].rstrip()
if cont is not None:
# look for closing ' or " or */
match = cont.match(line)
if match:
# found closing ' or " or */
line = line[match.end():]
cont = None
else:
# this whole line is in the middle of a string or comment
if cont is c_comment_end:
# still looking for */, clear the whole line
newlines.append("")
continue
else:
# still looking for ' or ", set line to backslash
newlines.append('\\')
continue
# start at column 0 and search for ', ", /*, or //
pos = 0
while True:
match = keychar.search(line, pos)
if match is None:
break
pos = match.start()
end = match.end()
# was the match /* ... */ ?
match = c_comment.match(line, pos)
if match:
line = line[0:pos] + " " + line[match.end():]
pos += 1
continue
# does the line have /* ... without the */ ?
match = c_comment_start.match(line, pos)
if match:
if line[-1] == '\\':
line = line[0:pos] + ' \\'
else:
line = line[0:pos]
cont = c_comment_end
break
# does the line have // ?
match = cpp_comment.match(line, pos)
if match:
if line[-1] == '\\':
line = line[0:pos] + ' \\'
else:
line = line[0:pos]
break
# did we find "..." ?
match = string_literal.match(line, pos)
if match:
line = line[0:pos] + "\"\"" + line[match.end():]
pos += 2
continue
# did we find "... without the final " ?
match = string_literal_start.match(line, pos)
if match:
line = line[0:pos] + "\"\"\\"
cont = string_literal_end
break
# did we find '...' ?
match = char_literal.match(line, pos)
if match:
line = line[0:pos] + "\' \'" + line[match.end():]
pos += 3
continue
# did we find '... without the final ' ?
match = char_literal_start.match(line, pos)
if match:
line = line[0:pos] + "\' \'\\"
cont = char_literal_end
break
# if we got to here, we found / that wasn't /* or //
pos += 1
# strip any trailing whitespace!
newlines.append(line.rstrip())
# The second part of this function looks for braces in the simplified
# code that we wrote to "newlines" after removing the contents of all
# string literals, character literals, and comments.
# Whenever we encounter an opening brace, we push its position onto a
# stack. Whenever we encounter the matching closing brace, we indent
# the braces as a pair.
# For #if directives, we check whether there are mismatched braces
# within the conditional block, and if so, we print a warning and reset
# the stack to the depth that it had at the start of the block.
# For #define directives, we save the stack and then restart counting
# braces until the end of the #define. Then we restore the stack.
# all changes go through this function
lines_changed = {} # keeps track of each line that was changed
def changeline(i, newtext, lines_changed=lines_changed):
if newtext != lines[i]:
lines[i] = newtext
lines_changed[i] = newtext
# we push a tuple (delim, row, col, newcol) onto this stack whenever
# we find a {, (, or [ delimiter, this keeps track of where we found
# the delimiter and what column we want to move it to
stack = []
lastdepth = 0
# this is a superstack that allows us to save the entire stack when we
# enter into an #if conditional block
dstack = []
# these are syntactic elements we need to look for
directive = re.compile(r"\s*#\s*(..)")
label = re.compile(r"""(case(?!\w)([^:]|::)+|\w+\s*(::\s*)*\s*:(?!:))""")
cflow = re.compile(r"(if|else|for|do|while|switch)(?!\w)")
delims = re.compile(r"[{}()\[\];]")
spaces = re.compile(r"\s*")
other = re.compile(r"(\w+|[^{}()\[\];\w\s]+)\s*")
cplusplus = re.compile(r"\s*#\s*ifdef\s+__cplusplus")
indentation = 0 # current indentation column
continuation = False # true if line continues an unfinished statement
new_context = True # also set when we enter a #define statement
in_else = False # set if in an #else
in_define = False # set if in #define
in_assign = False # set to deal with "= {" or #define x {"
leaving_define = False # set if at the end of a #define
save_stack = None # save stack when entering a #define
for i in range(n):
line = newlines[i]
# restore stack when leaving #define
if leaving_define:
stack, indentation, continuation = save_stack
save_stack = None
in_define = False
leaving_define = False
# handle #if conditionals
is_directive = False
in_else = False
match = directive.match(line)
if match:
is_directive = True
if match.groups()[0] == 'if':
dstack.append((list(stack), indentation, continuation,
line))
elif match.groups()[0] in ('en', 'el'):
oldstack, oldindent, oldcont, dline = dstack.pop()
if len(stack) > len(oldstack) and not cplusplus.match(dline):
sys.stderr.write(filename + ":" + str(i) + ": ")
sys.stderr.write("mismatched delimiter in \"" +
dline + "\" block\n")
if match.groups()[0] == 'el':
in_else = True
indentation = oldindent
continuation = oldcont
stack = oldstack
dstack.append((list(stack), indentation, continuation,
line))
elif match.groups()[0] == 'de':
in_define = True
leaving_define = False
save_stack = (stack, indentation, continuation)
stack = []
new_context = True
# remove backslash at end of line, if present
if len(line) > 0 and line[-1] == '\\':
line = line[0:-1].rstrip()
elif in_define:
leaving_define = True
if not is_directive and len(line) > 0 and not continuation:
# what is the indentation of the current line?
match = spaces.match(line)
if not line[match.end()] == '{':
indentation = match.end()
continuation = True
# new_context marks beginning of a file or a macro
if new_context:
continuation = False
indentation = 0
new_context = False
# skip initial whitespace
if is_directive:
pos = directive.match(line).end()
else:
pos = spaces.match(line).end()
# check for a label e.g. case
match = label.match(line, pos)
if match:
base = True
for item in stack:
if item[0] != '{':
base = False
if base:
word = re.match(r"\w*", match.group())
if word in ("case", "default"):
indentation = pos
continuation = False
# check for multiple labels on the same line
while match:
pos = spaces.match(line, match.end()).end()
match = label.match(line, pos)
# parse the line
while pos != len(line):
# check for if, else, for, while, do, switch
match = cflow.match(line, pos)
if match:
# if we are at the beginning of the line
if spaces.match(line).end() == pos:
indentation = pos
pos = spaces.match(line, match.end()).end()
continue
# check for a delimiter {} () [] or ;
match = delims.match(line, pos)
if not match:
# check for any other identifiers, operators
match = other.match(line, pos)
if match:
pos = match.end()
continue
else:
break
# found a delimiter
delim = line[pos]
if delim in ('(', '['):
# save delim, row, col, and current indentation
stack.append((delim, i, pos, indentation))
elif delim == '{':
if in_assign or line[0:pos-1].rstrip()[-1:] == "=":
# do not adjust braces for initializer lists
stack.append((delim, i, -1, indentation))
elif ((in_else or in_define) and spaces.sub("", line) == "{"):
# for opening braces that might have no match
indent = " "*indentation
changeline(i, spaces.sub(indent, lines[i], count=1))
stack.append((delim, i, pos, indentation))
else:
# save delim, row, col, and previous indentation
stack.append((delim, i, pos, indentation))
if spaces.sub("", newlines[i][0:pos]) == "":
indentation += 2
continuation = False
elif delim == ';':
# ';' marks end of statement unless inside for (;;)
if len(stack) == 0 or stack[-1][0] == '{':
continuation = False
else:
# found a ')', ']', or '}' delimiter, so pop its partner
try:
ldelim, j, k, indentation = stack.pop()
in_assign = (k < 0)
except IndexError:
ldelim = ""
if ldelim != {'}':'{', ')':'(', ']':'['}[delim]:
sys.stderr.write(filename + ":" + str(i) + ": ")
sys.stderr.write("mismatched \'" + delim + "\'\n")
# adjust the indentation of matching '{', '}'
if (ldelim == '{' and delim == '}' and not in_assign and
spaces.sub("", line[0:pos]) == ""):
if spaces.sub("", newlines[j][0:k]) == "":
indent = " "*indentation
changeline(j, spaces.sub(indent, lines[j], count=1))
changeline(i, spaces.sub(indent, lines[i], count=1))
elif i != j:
indent = " "*indentation
changeline(i, spaces.sub(indent, lines[i], count=1))
if delim == '}':
continuation = False
# eat whitespace and continue
pos = spaces.match(line, match.end()).end()
# check for " = " and #define assignments for the sake of
# the { inializer list } that might be on the following line
if len(line) > 0:
if (line[-1] == '=' or
(is_directive and in_define and not leaving_define)):
in_assign = True
elif not is_directive:
in_assign = False
if len(dstack) != 0:
sys.stderr.write(filename + ": ")
sys.stderr.write("mismatched #if conditional.\n")
if len(stack) != 0:
sys.stderr.write(filename + ":" + str(stack[0][1]) + ": ")
sys.stderr.write("no match for " + stack[0][0] +
" before end of file.\n")
if lines_changed:
# remove any trailing whitespace
trailing = re.compile(r" *$")
for i in range(n):
lines[i] = trailing.sub("", lines[i])
while n > 0 and lines[n-1].rstrip() == "":
n -= 1
if dry_run:
errcount = len(lines_changed)
line_numbers = list(lines_changed.keys())
line_numbers.sort()
line_numbers = [str(l + 1) for l in line_numbers[0:10] ]
if errcount > len(line_numbers):
line_numbers.append("...")
sys.stderr.write("Warning: " + filename +
": incorrect brace indentation on " +
str(errcount) +
(" lines: ", "line: ")[errcount == 1] +
", ".join(line_numbers) + "\n")
else:
# rewrite the file
ofile = open(filename, 'w')
ofile.writelines(lines)
ofile.close()
return True
return False
if __name__ == "__main__":
# ignore generated files
ignorefiles = ["lex.yy.c", "vtkParse.tab.c"]
files = []
opt_ignore = False # ignore all further options
opt_test = False # the --test option
for arg in sys.argv[1:]:
if arg[0:1] == '-' and not opt_ignore:
if arg == '--':
opt_ignore = True
elif arg == '--test':
opt_test = True
else:
sys.stderr.write("%s: unrecognized option %s\n" %
(os.path.split(sys.argv[0])[-1], arg))
sys.exit(1)
elif os.path.split(arg)[-1] not in ignorefiles:
files.append(arg)
# if --test was set, whenever a file needs modification, we set
# "failed" and continue checking the rest of the files
failed = False
for filename in files:
# repeat until no further changes occur
while reindent(filename, dry_run=opt_test):
if opt_test:
failed = True
break
if failed:
sys.exit(1)
|
a5ef5cbec509c68ba16227ba7de7a67690c4f72d
|
542ce6e738757d34fdea675349cbe1a0f975662b
|
/benchscripts/benchstat2
|
45f8fa05c94ae084eb7c9233b20e67605f178c9a
|
[
"BSD-3-Clause"
] |
permissive
|
aclements/go-misc
|
7f598ef0aa3720dffb7ac02541ae2d3199b47033
|
dbc22aabbcfb1e8d54725a10315fa16f7054ed48
|
refs/heads/master
| 2023-08-29T05:33:45.473551
| 2023-08-21T21:01:50
| 2023-08-21T21:03:38
| 34,076,292
| 230
| 23
|
BSD-3-Clause
| 2023-03-07T02:25:27
| 2015-04-16T19:33:13
|
Go
|
UTF-8
|
Python
| false
| false
| 3,906
|
benchstat2
|
#!/usr/bin/python3
import os
import sys
import tempfile
import subprocess
import argparse
import re
def expandHash(commits, h):
x = None
for c in commits:
if c.startswith(h):
if x != None:
raise ValueError("ambiguous commit hash " + h)
x = c
return x
def main():
parser = argparse.ArgumentParser(description="disentangle benchmark output")
parser.add_argument("-C", metavar="gitdir", help="git repo for resolving commit hashes", default=os.path.expanduser("~/go.dev"))
parser.add_argument("-o", metavar="base", help="write output to base-commit.log instead of invoking benchstat")
parser.add_argument("-benchsave", action="store_true", help="invoke benchsave instead of benchstat")
parser.add_argument("-geomean", action="store_true", help="pass -geomean to benchstat")
parser.add_argument("-delta-test", help="pass -delta-test to benchstat")
parser.add_argument("logs", nargs="+", help="input benchmark log files")
parser.add_argument("commits", nargs="*", help="commits to show")
args = parser.parse_args()
benchstat = args.o == None
if benchstat:
tmpdir = tempfile.TemporaryDirectory()
args.o = os.path.join(tmpdir.name, "out")
# Separate logs and commits arguments
for i, arg in enumerate(args.logs):
if re.fullmatch("[0-9a-fA-F]{5,}", arg):
args.commits = args.logs[i:]
args.logs = args.logs[:i]
break
if arg == "--":
args.commits = args.logs[i+1:]
args.logs = args.logs[:i]
break
# Process input files into output files
fmap = {}
logCommits = set()
for inp in args.logs:
parseInput(inp, args.o, fmap, logCommits)
for f, name in fmap.values():
f.close()
# Get commit order
listArgs = [list(logCommits)]
if args.commits:
# We want to accept revision list arguments, but keep things
# in argument order if there's more than one argument. This
# means we have to call rev-list separately for each argument.
listArgs = [["--no-walk", c] for c in args.commits]
commits = []
for listArg in listArgs:
commits += subprocess.check_output(["git", "-C", args.C, "rev-list", "--topo-order", "--reverse"] + listArg, universal_newlines=True).splitlines()
order = {cid: i for i, cid in enumerate(commits)}
# Get names in commit order.
if args.commits:
names = [args.o + "-" + expandHash(commits, h)[:10] + ".log" for h in commits]
else:
names = [fmap[cid][1]
for cid in sorted(fmap.keys(), key=lambda cid: order[cid])]
if benchstat:
# Invoke benchstat/benchsave
try:
os.chdir(os.path.dirname(args.o))
if args.benchsave:
benchargs = ["benchsave"]
else:
benchargs = ["benchstat"]
if args.geomean:
benchargs.append("-geomean")
if args.delta_test:
benchargs.extend(["-delta-test", args.delta_test])
subprocess.check_call(benchargs + list(map(os.path.basename, names)),
stdout=sys.stdout, stderr=sys.stderr)
finally:
# Allow deletion of temporary directory.
os.chdir("/")
else:
print(" ".join(names))
def parseInput(path, outbase, fmap, logCommits):
infile = open(path)
outfile = None
f = None
for l in infile:
if l.startswith("commit: "):
chash = l.split()[1].strip()
logCommits.add(chash)
f, name = fmap.get(chash, (None, None))
if f is None:
name = outbase + "-" + chash[:10] + ".log"
f = open(name, "w")
fmap[chash] = (f, name)
elif f:
f.write(l)
main()
|
|
4afd285af61b029cb7192211ab749f53576dc6d1
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/22_专题/区间问题/区间操作/贪心/906_区间分组-小根堆.py
|
64bb9c325ae1f8cc642230c6e0720abda50f5782
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
906_区间分组-小根堆.py
|
# 1. 左端点从小到大排序
# 2. 遍历区间,判断能否判断放入现有组中
# 给定 N 个闭区间 [ai,bi],
# 请你将这些区间分成若干组,
# 使得每组内部的区间两两之间(包括端点)没有交集,
# 并使得组数尽可能小。
# 输出最小组数。
# !n<=1e4
# !start<end<1e9
#######################################################################
# 解答:
# !区间按照左端点排序
# 小根堆存放所有组的右端点值,堆顶存放最小的右端点值
# 如果当前区间左端点大于堆顶元素,说明可以加入堆顶元素所在组,右端点入堆
# 如果当前区间左端点小于等于堆顶元素,说明当前区间与堆里面的区间重叠
# !也可以差分做:
# 会议室问题
# https://leetcode.cn/problems/meeting-rooms-ii/
from collections import defaultdict
from heapq import heappop, heappush
from itertools import accumulate
from typing import List
class Solution:
def minGroups(self, intervals: List[List[int]]) -> int:
intervals.sort()
pq = []
for start, end in intervals:
if pq and start > pq[0]:
heappop(pq)
heappush(pq, end) # 更新分区的末尾
return len(pq)
def minGroups2(self, intervals: List[List[int]]) -> int:
"""会议室系列 差分更新"""
diff = defaultdict(int)
for start, end in intervals:
diff[start] += 1
diff[end + 1] -= 1
nums = [diff[key] for key in sorted(diff)]
return max(accumulate(nums))
|
9590cdbc7650897d64d6f9464cc2a19366276756
|
5d55e0885bacd718588f25b71675c1127c93fc0a
|
/river/time_series/holt_winters.py
|
52241be66a9a03c4570c4b6db4085fd932e02756
|
[
"BSD-3-Clause"
] |
permissive
|
online-ml/river
|
5698b60e65493eba28d0c0c1992f19eb996c0bfa
|
c658393084ed4147a782daa6bcd4a467c3abb0cb
|
refs/heads/main
| 2023-09-03T00:12:55.121301
| 2023-08-29T12:04:20
| 2023-08-29T12:04:20
| 167,388,434
| 3,372
| 389
|
BSD-3-Clause
| 2023-09-12T08:11:15
| 2019-01-24T15:18:26
|
Python
|
UTF-8
|
Python
| false
| false
| 6,681
|
py
|
holt_winters.py
|
from __future__ import annotations
import operator
import statistics
from collections import deque
from river import time_series
__all__ = ["HoltWinters"]
class Component(deque):
...
class AdditiveLevel(Component):
def __init__(self, alpha):
super().__init__([], maxlen=2)
self.alpha = alpha
def update(self, y, trend, season):
self.append(
self.alpha * (y - (season[-season.seasonality] if season else 0))
+ (1 - self.alpha) * (self[-1] + (trend[-1] if trend else 0))
)
class MultiplicativeLevel(Component):
def __init__(self, alpha):
super().__init__([], maxlen=2)
self.alpha = alpha
def update(self, y, trend, season):
self.append(
self.alpha * (y / (season[-season.seasonality] if season else 1))
+ (1 - self.alpha) * (self[-1] + (trend[-1] if trend else 0))
)
class Trend(Component):
def __init__(self, beta):
super().__init__([], maxlen=2)
self.beta = beta
def update(self, y, level):
self.append(self.beta * (level[-1] - level[-2]) + (1 - self.beta) * self[-1])
class AdditiveSeason(Component):
def __init__(self, gamma, seasonality):
super().__init__([], maxlen=seasonality + 1)
self.gamma = gamma
self.seasonality = seasonality
def update(self, y, level, trend):
self.append(
self.gamma * (y - level[-2] - trend[-2]) + (1 - self.gamma) * self[-self.seasonality]
)
class MultiplicativeSeason(Component):
def __init__(self, gamma, seasonality):
super().__init__([], maxlen=seasonality + 1)
self.gamma = gamma
self.seasonality = seasonality
def update(self, y, level, trend):
self.append(
self.gamma * y / (level[-2] + trend[-2]) + (1 - self.gamma) * self[-self.seasonality]
)
class HoltWinters(time_series.base.Forecaster):
r"""Holt-Winters forecaster.
This is a standard implementation of the Holt-Winters forecasting method. Certain
parametrisations result in special cases, such as simple exponential smoothing.
Optimal parameters and initialisation values can be determined in a batch setting. However, in
an online setting, it is necessary to wait and observe enough values. The first
`k = max(2, seasonality)` values are indeed used to initialize the components.
**Level initialization**
$$l = \frac{1}{k} \sum_{i=1}{k} y_i$$
**Trend initialization**
$$t = \frac{1}{k - 1} \sum_{i=2}{k} y_i - y_{i-1}$$
**Trend initialization**
$$s_i = \frac{y_i}{k}$$
Parameters
----------
alpha
Smoothing parameter for the level.
beta
Smoothing parameter for the trend.
gamma
Smoothing parameter for the seasonality.
seasonality
The number of periods in a season. For instance, this should be 4 for quarterly data,
and 12 for yearly data.
multiplicative
Whether or not to use a multiplicative formulation.
Examples
--------
>>> from river import datasets
>>> from river import metrics
>>> from river import time_series
>>> dataset = datasets.AirlinePassengers()
>>> model = time_series.HoltWinters(
... alpha=0.3,
... beta=0.1,
... gamma=0.6,
... seasonality=12,
... multiplicative=True
... )
>>> metric = metrics.MAE()
>>> time_series.evaluate(
... dataset,
... model,
... metric,
... horizon=12
... )
+1 MAE: 25.899087
+2 MAE: 26.26131
+3 MAE: 25.735903
+4 MAE: 25.625678
+5 MAE: 26.093842
+6 MAE: 26.90249
+7 MAE: 28.634398
+8 MAE: 29.284769
+9 MAE: 31.018351
+10 MAE: 32.252349
+11 MAE: 33.518946
+12 MAE: 33.975057
References
----------
[^1]: [Exponential smoothing — Wikipedia](https://www.wikiwand.com/en/Exponential_smoothing)
[^2]: [Exponential smoothing — Forecasting: Principles and Practice](https://otexts.com/fpp2/expsmooth.html)
[^3]: [What is Exponential Smoothing? — Engineering statistics handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc43.htm)
"""
def __init__(
self,
alpha,
beta=None,
gamma=None,
seasonality=0,
multiplicative=False,
):
if seasonality and gamma is None:
raise ValueError("gamma must be set if seasonality is set")
if gamma and beta is None:
raise ValueError("beta must be set if gamma is set")
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.seasonality = seasonality
self.multiplicative = multiplicative
self.level = MultiplicativeLevel(alpha) if multiplicative else AdditiveLevel(alpha)
self.trend = Trend(beta) if beta else None
self.season = (
(
MultiplicativeSeason(gamma, seasonality)
if multiplicative
else AdditiveSeason(gamma, seasonality)
)
if seasonality
else None
)
self._first_values = []
self._initialized = False
def learn_one(self, y, x=None):
if self._initialized:
self.level.update(y, self.trend, self.season)
if self.trend is not None:
self.trend.update(y, self.level)
if self.season is not None:
self.season.update(y, self.level, self.trend)
return self
self._first_values.append(y)
if len(self._first_values) < max(2, self.seasonality):
return self
# The components can be initialized now that enough values have been observed
self.level.append(statistics.mean(self._first_values))
diffs = [b - a for a, b in zip(self._first_values[:-1], self._first_values[1:])]
if self.trend is not None:
self.trend.append(statistics.mean(diffs))
if self.season is not None:
self.season.extend([y / self.level[-1] for y in self._first_values])
self._initialized = True
return self
def forecast(self, horizon, xs=None):
op = operator.mul if self.multiplicative else operator.add
return [
op(
self.level[-1] + ((h + 1) * self.trend[-1] if self.trend else 0),
(
self.season[-self.seasonality + h % self.seasonality]
if self.season
else (1 if self.multiplicative else 0)
),
)
for h in range(horizon)
]
|
35b8745b891aca0c5c950b43dac45e85a23ea049
|
f54739ec8ca9a9012eefda5c4759a62db3fef3c2
|
/src/api-engine/api/utils/__init__.py
|
b5bc027672d08aacae133e1b1fbce4ca0373979c
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
hyperledger/cello
|
6e615ab0df9724262ef6c2028d45f2f642254fe2
|
cb4d24347228ad9d1ae24cd0d6188bf29b1b8cbe
|
refs/heads/main
| 2023-09-03T15:33:35.844553
| 2023-08-29T03:47:41
| 2023-08-29T03:47:41
| 78,610,786
| 957
| 559
|
Apache-2.0
| 2023-09-12T00:53:55
| 2017-01-11T06:47:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
__init__.py
|
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
from rest_framework.views import exception_handler
from rest_framework.exceptions import ValidationError, ParseError
from api.common.enums import ErrorCode
from rest_framework import status
from rest_framework.exceptions import ErrorDetail
from .common import zip_dir, zip_file
from api.common import ok, err
LOG = logging.getLogger(__name__)
def custom_exception_handler(exc, context):
response = exception_handler(exc, context)
if response is not None:
if (
response.status_code == status.HTTP_400_BAD_REQUEST
and isinstance(response.data, dict)
and "code" not in response.data
):
if isinstance(exc, ValidationError):
response.data["code"] = ErrorCode.ValidationError.value
response.data[
"detail"
] = ErrorCode.ValidationError.display_string
elif isinstance(exc, ParseError):
response.data["code"] = ErrorCode.ParseError.value
response.data["detail"] = ErrorCode.ParseError.display_string
elif isinstance(response.data.get("detail"), ErrorDetail):
# response.data["code"] = response.data.get("detail").code
response.data = err(response.data.get("detail"))
else:
response.data["code"] = ErrorCode.Unknown.value
response.data["detail"] = ErrorCode.Unknown.display_string
return response
|
161fb43e3678581ca3348cd87616383a7b58d331
|
6b6d42eadf53e90b08ce564fb188a9a4b126ef12
|
/testsuite/python_support/utils.py
|
681016d930472964c2c159fc56247b158ba74a37
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA"
] |
permissive
|
AdaCore/libadalang
|
f97b95d1672cb1e5083c49ee632c6f9c787d36c2
|
50d658afa70ccbf46b8f7d9d43a21d45d56b206c
|
refs/heads/master
| 2023-09-01T18:34:26.976692
| 2023-08-25T15:53:43
| 2023-08-25T15:53:43
| 47,627,172
| 158
| 49
|
Apache-2.0
| 2022-12-14T10:29:45
| 2015-12-08T14:28:22
|
Ada
|
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
utils.py
|
import os
import os.path
import pipes
import re
import subprocess
LAL_ROOTDIR = os.path.abspath(os.environ['LIBADALANG_ROOTDIR'])
LAL_DISABLE_SHARED = bool(int(os.environ['LIBADALANG_DISABLE_SHARED']))
LAL_BUILD_MODE = os.environ['LIBADALANG_BUILD_MODE'] or "dev"
DIRECTORY_MISSING_RE = re.compile(
r'.*\.gpr:\d+:\d+: warning:'
r' \w+ directory ".*" (not found|does not exist)'
)
# Arguments to pass to GPR tools in order to process project files involving
# libadalang.gpr/langkit_support.gpr.
LIBRARY_KIND = 'static' if LAL_DISABLE_SHARED else 'relocatable'
GPR_ARGS = [
'-XLIBRARY_TYPE={}'.format(LIBRARY_KIND),
'-XXMLADA_BUILD={}'.format(LIBRARY_KIND),
'-XBUILD_MODE={}'.format(LAL_BUILD_MODE),
# Make sure GPRbuild does not try to rebuild Libadalang, as this will break
# other tests running in parallel.
'-XLIBADALANG_EXTERNALLY_BUILT=true',
]
def in_contrib(*args):
"""
Return a path under the "contrib" subdir in the top of the repository.
"""
return os.path.join(LAL_ROOTDIR, 'contrib', *args)
def gprbuild(project_file):
"""
Invoke gprbuild on the given project file.
This passes all the command-line arguments that are required to build a
project that depends on Libadalang.
"""
subprocess.check_call(
['gprbuild', '-P', project_file, '-q', '-p'] + GPR_ARGS
)
def run_nameres(args):
"""
Run the name resolution program with the given arguments.
If it exits with a non-zero status code, print an error message, display
its output and stop. Otherwise, display its output with warnings about
missing directories filtered out.
:param list[str] args: Arguments to pass to nameres.
"""
argv = ['nameres'] + args
p = subprocess.Popen(argv, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='ascii')
stdout, _ = p.communicate()
if p.returncode:
print('nameres exitted with status code {}'.format(p.returncode))
print('Command line was:', ' '.join(pipes.quote(a) for a in argv))
print('Output was:')
print('')
print(stdout)
return
for line in stdout.splitlines():
line = line.strip()
if not DIRECTORY_MISSING_RE.match(line):
print(line)
|
029b49223debc02bb943115d5d87989f9de63bff
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/data_structures/Graphs/graphsearch/graph_coloring/python/graph_color_greedy.py
|
375d3918902001b208be86022cfda0174a0c4228
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 743
|
py
|
graph_color_greedy.py
|
from collections import defaultdict
class Graph:
def __init__(self,V,directed=False):
self.V = V
self.directed = directed
self.graph = defaultdict(list)
def add_edge(self,a,b):
self.graph[a].append(b)
if not self.directed:
self.graph[b].append(a)
def color_greedy(self):
result = [-1]*self.V
max_color = 0
for v,adj in self.graph.items():
color = 0
while color in [result[x] for x in adj]:
color+=1
max_color = max(max_color,color)
result[v] = color
return result,max_color
if __name__ == "__main__":
g = Graph(5)
g.add_edge(0,1)
g.add_edge(0,2)
g.add_edge(1,2)
g.add_edge(1,3)
g.add_edge(2,3)
g.add_edge(3,4)
res,m = g.color_greedy()
print("max colors: {} list: {}".format(m,res))
|
b72d3c207323817ed08d925618bc4a569b1d52b6
|
bf5acb19d44d031e2d8a9e37266acd55c5697863
|
/pybamm/parameters/parameter_values.py
|
c78cb70c91c9cf16cca2118e0411a1fbac1b312b
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
pybamm-team/PyBaMM
|
82ecf9bebb580aab1a4e67aa7d0297d2698a0b51
|
b4432b6da7331f992b1831912a9cf89be1f7578f
|
refs/heads/develop
| 2023-08-19T04:29:21.151964
| 2023-08-18T22:43:38
| 2023-08-18T22:43:38
| 155,538,761
| 713
| 362
|
BSD-3-Clause
| 2023-09-14T18:20:04
| 2018-10-31T10:26:29
|
Python
|
UTF-8
|
Python
| false
| false
| 32,690
|
py
|
parameter_values.py
|
#
# Parameter values for a simulation
#
import numpy as np
import pybamm
import numbers
from pprint import pformat
from collections import defaultdict
class ParameterValues:
"""
The parameter values for a simulation.
Note that this class does not inherit directly from the python dictionary class as
this causes issues with saving and loading simulations.
Parameters
----------
values : dict or string
Explicit set of parameters, or reference to an inbuilt parameter set
If string and matches one of the inbuilt parameter sets, returns that parameter
set.
Examples
--------
>>> import pybamm
>>> values = {"some parameter": 1, "another parameter": 2}
>>> param = pybamm.ParameterValues(values)
>>> param["some parameter"]
1
>>> param = pybamm.ParameterValues("Marquis2019")
>>> param["Reference temperature [K]"]
298.15
"""
def __init__(self, values, chemistry=None):
if chemistry is not None:
raise ValueError(
"The 'chemistry' keyword argument has been deprecated. "
"Call `ParameterValues` with a dictionary dictionary of "
"parameter values, or the name of a parameter set (string), "
"as the single argument, e.g. `ParameterValues('Chen2020')`.",
)
# add physical constants as default values
self._dict_items = pybamm.FuzzyDict(
{
"Ideal gas constant [J.K-1.mol-1]": pybamm.constants.R.value,
"Faraday constant [C.mol-1]": pybamm.constants.F.value,
"Boltzmann constant [J.K-1]": pybamm.constants.k_b.value,
"Electron charge [C]": pybamm.constants.q_e.value,
}
)
if isinstance(values, (dict, ParameterValues)):
# remove the "chemistry" key if it exists
values.pop("chemistry", None)
self.update(values, check_already_exists=False)
else:
# Check if values is a named parameter set
if isinstance(values, str) and values in pybamm.parameter_sets.keys():
values = pybamm.parameter_sets[values]
values.pop("chemistry", None)
self.update(values, check_already_exists=False)
else:
raise ValueError("Invalid Parameter Value")
# Initialise empty _processed_symbols dict (for caching)
self._processed_symbols = {}
# save citations
if "citations" in self._dict_items:
for citation in self._dict_items["citations"]:
pybamm.citations.register(citation)
@staticmethod
def create_from_bpx(filename, target_soc=1):
"""
Parameters
----------
filename: str
The filename of the bpx file
target_soc : float, optional
Target state of charge. Must be between 0 and 1. Default is 1.
Returns
-------
ParameterValues
A parameter values object with the parameters in the bpx file
"""
if target_soc < 0 or target_soc > 1:
raise ValueError("Target SOC should be between 0 and 1")
from bpx import parse_bpx_file, get_electrode_concentrations
from .bpx import _bpx_to_param_dict
# parse bpx
bpx = parse_bpx_file(filename)
pybamm_dict = _bpx_to_param_dict(bpx)
if "Open-circuit voltage at 0% SOC [V]" not in pybamm_dict:
pybamm_dict["Open-circuit voltage at 0% SOC [V]"] = pybamm_dict[
"Lower voltage cut-off [V]"
]
pybamm_dict["Open-circuit voltage at 100% SOC [V]"] = pybamm_dict[
"Upper voltage cut-off [V]"
]
# probably should put a warning here to indicate we are going
# ahead with the low voltage limit.
# get initial concentrations based on SOC
c_n_init, c_p_init = get_electrode_concentrations(target_soc, bpx)
pybamm_dict["Initial concentration in negative electrode [mol.m-3]"] = c_n_init
pybamm_dict["Initial concentration in positive electrode [mol.m-3]"] = c_p_init
return pybamm.ParameterValues(pybamm_dict)
def __getitem__(self, key):
return self._dict_items[key]
def get(self, key, default=None):
"""Return item corresponding to key if it exists, otherwise return default"""
try:
return self._dict_items[key]
except KeyError:
return default
def __setitem__(self, key, value):
"""Call the update functionality when doing a setitem"""
self.update({key: value})
def __delitem__(self, key):
del self._dict_items[key]
def __repr__(self):
return pformat(self._dict_items, width=1)
def __eq__(self, other):
return self._dict_items == other._dict_items
def keys(self):
"""Get the keys of the dictionary"""
return self._dict_items.keys()
def values(self):
"""Get the values of the dictionary"""
return self._dict_items.values()
def items(self):
"""Get the items of the dictionary"""
return self._dict_items.items()
def pop(self, *args, **kwargs):
self._dict_items.pop(*args, **kwargs)
def copy(self):
"""Returns a copy of the parameter values. Makes sure to copy the internal
dictionary."""
new_copy = ParameterValues(self._dict_items.copy())
return new_copy
def search(self, key, print_values=True):
"""
Search dictionary for keys containing 'key'.
See :meth:`pybamm.FuzzyDict.search()`.
"""
return self._dict_items.search(key, print_values)
def update(self, values, check_conflict=False, check_already_exists=True, path=""):
"""
Update parameter dictionary, while also performing some basic checks.
Parameters
----------
values : dict
Dictionary of parameter values to update parameter dictionary with
check_conflict : bool, optional
Whether to check that a parameter in `values` has not already been defined
in the parameter class when updating it, and if so that its value does not
change. This is set to True during initialisation, when parameters are
combined from different sources, and is False by default otherwise
check_already_exists : bool, optional
Whether to check that a parameter in `values` already exists when trying to
update it. This is to avoid cases where an intended change in the parameters
is ignored due a typo in the parameter name, and is True by default but can
be manually overridden.
path : string, optional
Path from which to load functions
"""
# check if values is not a dictionary
if not isinstance(values, dict):
values = values._dict_items
# check parameter values
self.check_parameter_values(values)
# update
for name, value in values.items():
# check for conflicts
if (
check_conflict is True
and name in self.keys()
and not (self[name] == float(value) or self[name] == value)
):
raise ValueError(
"parameter '{}' already defined with value '{}'".format(
name, self[name]
)
)
# check parameter already exists (for updating parameters)
if check_already_exists is True:
try:
self._dict_items[name]
except KeyError as err:
raise KeyError(
"Cannot update parameter '{}' as it does not ".format(name)
+ "have a default value. ({}). If you are ".format(err.args[0])
+ "sure you want to update this parameter, use "
+ "param.update({{name: value}}, check_already_exists=False)"
)
# if no conflicts, update
if isinstance(value, str):
if (
value.startswith("[function]")
or value.startswith("[current data]")
or value.startswith("[data]")
or value.startswith("[2D data]")
):
raise ValueError(
"Specifying parameters via [function], [current data], [data] "
"or [2D data] is no longer supported. For functions, pass in a "
"python function object. For data, pass in a python function "
"that returns a pybamm Interpolant object. "
"See https://tinyurl.com/merv43ss for an example with both."
)
elif value == "[input]":
self._dict_items[name] = pybamm.InputParameter(name)
# Anything else should be a converted to a float
else:
self._dict_items[name] = float(value)
elif isinstance(value, tuple) and isinstance(value[1], np.ndarray):
# If data is provided as a 2-column array (1D data),
# convert to two arrays for compatibility with 2D data
# see #1805
func_name, data = value
data = ([data[:, 0]], data[:, 1])
self._dict_items[name] = (func_name, data)
else:
self._dict_items[name] = value
# reset processed symbols
self._processed_symbols = {}
def set_initial_stoichiometries(
self,
initial_value,
param=None,
known_value="cyclable lithium capacity",
inplace=True,
):
"""
Set the initial stoichiometry of each electrode, based on the initial
SOC or voltage
"""
param = param or pybamm.LithiumIonParameters()
x, y = pybamm.lithium_ion.get_initial_stoichiometries(
initial_value, self, param=param, known_value=known_value
)
if inplace:
parameter_values = self
else:
parameter_values = self.copy()
c_n_max = self.evaluate(param.n.prim.c_max)
c_p_max = self.evaluate(param.p.prim.c_max)
parameter_values.update(
{
"Initial concentration in negative electrode [mol.m-3]": x * c_n_max,
"Initial concentration in positive electrode [mol.m-3]": y * c_p_max,
}
)
return parameter_values
def check_parameter_values(self, values):
for param in values:
if "propotional term" in param:
raise ValueError(
f"The parameter '{param}' has been renamed to "
"'... proportional term [s-1]', and its value should now be divided"
"by 3600 to get the same results as before."
)
# specific check for renamed parameter "1 + dlnf/dlnc"
if "1 + dlnf/dlnc" in param:
raise ValueError(
f"parameter '{param}' has been renamed to " "'Thermodynamic factor'"
)
def process_model(self, unprocessed_model, inplace=True):
"""Assign parameter values to a model.
Currently inplace, could be changed to return a new model.
Parameters
----------
unprocessed_model : :class:`pybamm.BaseModel`
Model to assign parameter values for
inplace: bool, optional
If True, replace the parameters in the model in place. Otherwise, return a
new model with parameter values set. Default is True.
Raises
------
:class:`pybamm.ModelError`
If an empty model is passed (`model.rhs = {}` and `model.algebraic = {}` and
`model.variables = {}`)
"""
pybamm.logger.info(
"Start setting parameters for {}".format(unprocessed_model.name)
)
# set up inplace vs not inplace
if inplace:
# any changes to unprocessed_model attributes will change model attributes
# since they point to the same object
model = unprocessed_model
else:
# create a copy of the model
model = unprocessed_model.new_copy()
if (
len(unprocessed_model.rhs) == 0
and len(unprocessed_model.algebraic) == 0
and len(unprocessed_model.variables) == 0
):
raise pybamm.ModelError("Cannot process parameters for empty model")
new_rhs = {}
for variable, equation in unprocessed_model.rhs.items():
pybamm.logger.verbose(
"Processing parameters for {!r} (rhs)".format(variable)
)
new_variable = self.process_symbol(variable)
new_rhs[new_variable] = self.process_symbol(equation)
model.rhs = new_rhs
new_algebraic = {}
for variable, equation in unprocessed_model.algebraic.items():
pybamm.logger.verbose(
"Processing parameters for {!r} (algebraic)".format(variable)
)
new_variable = self.process_symbol(variable)
new_algebraic[new_variable] = self.process_symbol(equation)
model.algebraic = new_algebraic
new_initial_conditions = {}
for variable, equation in unprocessed_model.initial_conditions.items():
pybamm.logger.verbose(
"Processing parameters for {!r} (initial conditions)".format(variable)
)
new_variable = self.process_symbol(variable)
new_initial_conditions[new_variable] = self.process_symbol(equation)
model.initial_conditions = new_initial_conditions
model.boundary_conditions = self.process_boundary_conditions(unprocessed_model)
new_variables = {}
for variable, equation in unprocessed_model.variables.items():
pybamm.logger.verbose(
"Processing parameters for {!r} (variables)".format(variable)
)
new_variables[variable] = self.process_symbol(equation)
model.variables = new_variables
new_events = []
for event in unprocessed_model.events:
pybamm.logger.verbose(
"Processing parameters for event '{}''".format(event.name)
)
new_events.append(
pybamm.Event(
event.name, self.process_symbol(event.expression), event.event_type
)
)
interpolant_events = self._get_interpolant_events(model)
for event in interpolant_events:
pybamm.logger.verbose(
"Processing parameters for event '{}''".format(event.name)
)
new_events.append(
pybamm.Event(
event.name, self.process_symbol(event.expression), event.event_type
)
)
model.events = new_events
pybamm.logger.info("Finish setting parameters for {}".format(model.name))
return model
def _get_interpolant_events(self, model):
"""Add events for functions that have been defined as parameters"""
# Define events to catch extrapolation. In these events the sign is
# important: it should be positive inside of the range and negative
# outside of it
interpolants = model._find_symbols(pybamm.Interpolant)
interpolant_events = []
for interpolant in interpolants:
xs = interpolant.x
children = interpolant.children
for x, child in zip(xs, children):
interpolant_events.extend(
[
pybamm.Event(
f"Interpolant '{interpolant.name}' lower bound",
pybamm.min(child - min(x)),
pybamm.EventType.INTERPOLANT_EXTRAPOLATION,
),
pybamm.Event(
f"Interpolant '{interpolant.name}' upper bound",
pybamm.min(max(x) - child),
pybamm.EventType.INTERPOLANT_EXTRAPOLATION,
),
]
)
return interpolant_events
def process_boundary_conditions(self, model):
"""
Process boundary conditions for a model
Boundary conditions are dictionaries {"left": left bc, "right": right bc}
in general, but may be imposed on the tabs (or *not* on the tab) for a
small number of variables, e.g. {"negative tab": neg. tab bc,
"positive tab": pos. tab bc "no tab": no tab bc}.
"""
new_boundary_conditions = {}
sides = ["left", "right", "negative tab", "positive tab", "no tab"]
for variable, bcs in model.boundary_conditions.items():
processed_variable = self.process_symbol(variable)
new_boundary_conditions[processed_variable] = {}
for side in sides:
try:
bc, typ = bcs[side]
pybamm.logger.verbose(
"Processing parameters for {!r} ({} bc)".format(variable, side)
)
processed_bc = (self.process_symbol(bc), typ)
new_boundary_conditions[processed_variable][side] = processed_bc
except KeyError as err:
# don't raise error if the key error comes from the side not being
# found
if err.args[0] in side:
pass
# do raise error otherwise (e.g. can't process symbol)
else:
raise KeyError(err)
return new_boundary_conditions
def process_geometry(self, geometry):
"""
Assign parameter values to a geometry (inplace).
Parameters
----------
geometry : dict
Geometry specs to assign parameter values to
"""
def process_and_check(sym):
new_sym = self.process_symbol(sym)
if not isinstance(new_sym, pybamm.Scalar):
raise ValueError(
"Geometry parameters must be Scalars after parameter processing"
)
return new_sym
for domain in geometry:
for spatial_variable, spatial_limits in geometry[domain].items():
# process tab information if using 1 or 2D current collectors
if spatial_variable == "tabs":
for tab, position_size in spatial_limits.items():
for position_size, sym in position_size.items():
geometry[domain]["tabs"][tab][
position_size
] = process_and_check(sym)
else:
for lim, sym in spatial_limits.items():
geometry[domain][spatial_variable][lim] = process_and_check(sym)
def process_symbol(self, symbol):
"""Walk through the symbol and replace any Parameter with a Value.
If a symbol has already been processed, the stored value is returned.
Parameters
----------
symbol : :class:`pybamm.Symbol`
Symbol or Expression tree to set parameters for
Returns
-------
symbol : :class:`pybamm.Symbol`
Symbol with Parameter instances replaced by Value
"""
try:
return self._processed_symbols[symbol]
except KeyError:
processed_symbol = self._process_symbol(symbol)
self._processed_symbols[symbol] = processed_symbol
return processed_symbol
def _process_symbol(self, symbol):
"""See :meth:`ParameterValues.process_symbol()`."""
if isinstance(symbol, pybamm.Parameter):
value = self[symbol.name]
if isinstance(value, numbers.Number):
# Check not NaN (parameter in csv file but no value given)
if np.isnan(value):
raise ValueError(f"Parameter '{symbol.name}' not found")
# Scalar inherits name
return pybamm.Scalar(value, name=symbol.name)
elif isinstance(value, pybamm.Symbol):
new_value = self.process_symbol(value)
new_value.copy_domains(symbol)
return new_value
else:
raise TypeError("Cannot process parameter '{}'".format(value))
elif isinstance(symbol, pybamm.FunctionParameter):
function_name = self[symbol.name]
if isinstance(
function_name,
(numbers.Number, pybamm.Interpolant, pybamm.InputParameter),
) or (
isinstance(function_name, pybamm.Symbol)
and function_name.size_for_testing == 1
):
# no need to process children, they will only be used for shape
new_children = symbol.children
else:
# process children
new_children = []
for child in symbol.children:
if symbol.diff_variable is not None and any(
x == symbol.diff_variable for x in child.pre_order()
):
# Wrap with NotConstant to avoid simplification,
# which would stop symbolic diff from working properly
new_child = pybamm.NotConstant(child)
new_children.append(self.process_symbol(new_child))
else:
new_children.append(self.process_symbol(child))
# Create Function or Interpolant or Scalar object
if isinstance(function_name, tuple):
if len(function_name) == 2: # CSV or JSON parsed data
# to create an Interpolant
name, data = function_name
if len(data[0]) == 1:
input_data = data[0][0], data[1]
else:
input_data = data
# For parameters provided as data we use a cubic interpolant
# Note: the cubic interpolant can be differentiated
function = pybamm.Interpolant(
input_data[0],
input_data[-1],
new_children,
name=name,
)
else: # pragma: no cover
raise ValueError(
"Invalid function name length: {0}".format(len(function_name))
)
elif isinstance(function_name, numbers.Number):
# Check not NaN (parameter in csv file but no value given)
if np.isnan(function_name):
raise ValueError(
f"Parameter '{symbol.name}' (possibly a function) not found"
)
# If the "function" is provided is actually a scalar, return a Scalar
# object instead of throwing an error.
function = pybamm.Scalar(function_name, name=symbol.name)
elif callable(function_name):
# otherwise evaluate the function to create a new PyBaMM object
function = function_name(*new_children)
elif isinstance(
function_name, (pybamm.Interpolant, pybamm.InputParameter)
) or (
isinstance(function_name, pybamm.Symbol)
and function_name.size_for_testing == 1
):
function = function_name
else:
raise TypeError(
"Parameter provided for '{}' ".format(symbol.name)
+ "is of the wrong type (should either be scalar-like or callable)"
)
# Differentiate if necessary
if symbol.diff_variable is None:
# Use ones_like so that we get the right shapes
function_out = function * pybamm.ones_like(*new_children)
else:
# return differentiated function
new_diff_variable = self.process_symbol(symbol.diff_variable)
function_out = function.diff(new_diff_variable)
# Process again just to be sure
return self.process_symbol(function_out)
elif isinstance(symbol, pybamm.BinaryOperator):
# process children
new_left = self.process_symbol(symbol.left)
new_right = self.process_symbol(symbol.right)
# make new symbol, ensure domain remains the same
new_symbol = symbol._binary_new_copy(new_left, new_right)
new_symbol.copy_domains(symbol)
return new_symbol
# Unary operators
elif isinstance(symbol, pybamm.UnaryOperator):
new_child = self.process_symbol(symbol.child)
new_symbol = symbol._unary_new_copy(new_child)
# ensure domain remains the same
new_symbol.copy_domains(symbol)
# x_average can sometimes create a new symbol with electrode thickness
# parameters, so we process again to make sure these parameters are set
if isinstance(symbol, pybamm.XAverage) and not isinstance(
new_symbol, pybamm.XAverage
):
new_symbol = self.process_symbol(new_symbol)
# f_a_dist in the size average needs to be processed
if isinstance(new_symbol, pybamm.SizeAverage):
new_symbol.f_a_dist = self.process_symbol(new_symbol.f_a_dist)
return new_symbol
# Functions
elif isinstance(symbol, pybamm.Function):
new_children = [self.process_symbol(child) for child in symbol.children]
return symbol._function_new_copy(new_children)
# Concatenations
elif isinstance(symbol, pybamm.Concatenation):
new_children = [self.process_symbol(child) for child in symbol.children]
return symbol._concatenation_new_copy(new_children)
# Variables: update scale
elif isinstance(symbol, pybamm.Variable):
new_symbol = symbol.create_copy()
new_symbol._scale = self.process_symbol(symbol.scale)
reference = self.process_symbol(symbol.reference)
if isinstance(reference, pybamm.Vector):
# address numpy 1.25 deprecation warning: array should have ndim=0
# before conversion
reference = pybamm.Scalar((reference.evaluate()).item())
new_symbol._reference = reference
new_symbol.bounds = tuple([self.process_symbol(b) for b in symbol.bounds])
return new_symbol
elif isinstance(symbol, numbers.Number):
return pybamm.Scalar(symbol)
else:
# Backup option: return the object
return symbol
def evaluate(self, symbol):
"""
Process and evaluate a symbol.
Parameters
----------
symbol : :class:`pybamm.Symbol`
Symbol or Expression tree to evaluate
Returns
-------
number or array
The evaluated symbol
"""
processed_symbol = self.process_symbol(symbol)
if processed_symbol.is_constant():
return processed_symbol.evaluate()
else:
raise ValueError("symbol must evaluate to a constant scalar or array")
def _ipython_key_completions_(self):
return list(self._dict_items.keys())
def print_parameters(self, parameters, output_file=None):
"""
Return dictionary of evaluated parameters, and optionally print these evaluated
parameters to an output file.
Parameters
----------
parameters : class or dict containing :class:`pybamm.Parameter` objects
Class or dictionary containing all the parameters to be evaluated
output_file : string, optional
The file to print parameters to. If None, the parameters are not printed,
and this function simply acts as a test that all the parameters can be
evaluated, and returns the dictionary of evaluated parameters.
Returns
-------
evaluated_parameters : defaultdict
The evaluated parameters, for further processing if needed
Notes
-----
A C-rate of 1 C is the current required to fully discharge the battery in 1
hour, 2 C is current to discharge the battery in 0.5 hours, etc
"""
# Set list of attributes to ignore, for when we are evaluating parameters from
# a class of parameters
ignore = [
"__name__",
"__doc__",
"__package__",
"__loader__",
"__spec__",
"__file__",
"__cached__",
"__builtins__",
"absolute_import",
"division",
"print_function",
"unicode_literals",
"pybamm",
"_options",
"constants",
"np",
"geo",
"elec",
"therm",
"half_cell",
"x",
"r",
]
# If 'parameters' is a class, extract the dict
if not isinstance(parameters, dict):
parameters_dict = {
k: v for k, v in parameters.__dict__.items() if k not in ignore
}
for domain in ["n", "s", "p"]:
domain_param = getattr(parameters, domain)
parameters_dict.update(
{
f"{domain}.{k}": v
for k, v in domain_param.__dict__.items()
if k not in ignore
}
)
parameters = parameters_dict
evaluated_parameters = defaultdict(list)
# Turn to regular dictionary for faster KeyErrors
self._dict_items = dict(self._dict_items)
for name, symbol in parameters.items():
if isinstance(symbol, pybamm.Symbol):
try:
proc_symbol = self.process_symbol(symbol)
except KeyError:
# skip parameters that don't have a value in that parameter set
proc_symbol = None
if not (
callable(proc_symbol)
or proc_symbol is None
or proc_symbol.has_symbol_of_classes(
(pybamm.Concatenation, pybamm.Broadcast)
)
):
evaluated_parameters[name] = proc_symbol.evaluate(t=0)
# Turn back to FuzzyDict
self._dict_items = pybamm.FuzzyDict(self._dict_items)
# Print the evaluated_parameters dict to output_file
if output_file:
self.print_evaluated_parameters(evaluated_parameters, output_file)
return evaluated_parameters
def print_evaluated_parameters(self, evaluated_parameters, output_file):
"""
Print a dictionary of evaluated parameters to an output file
Parameters
----------
evaluated_parameters : defaultdict
The evaluated parameters, for further processing if needed
output_file : string, optional
The file to print parameters to. If None, the parameters are not printed,
and this function simply acts as a test that all the parameters can be
evaluated
"""
# Get column width for pretty printing
column_width = max(len(name) for name in evaluated_parameters.keys())
s = "{{:>{}}}".format(column_width)
with open(output_file, "w") as file:
for name, value in sorted(evaluated_parameters.items()):
if 0.001 < abs(value) < 1000:
file.write((s + " : {:10.4g}\n").format(name, value))
else:
file.write((s + " : {:10.3E}\n").format(name, value))
|
0343995f48d441fd38aa92a2478cf7007bc70115
|
81a0a8218a45edcc8f295de5d41a3fd29cdc3ce6
|
/switch_model/balancing/unserved_load.py
|
d06156b540f974a6d85f718dbb24fd85d7e9984e
|
[
"Apache-2.0"
] |
permissive
|
switch-model/switch
|
af5ea212a141d97613ef1f13e550ee898fa352da
|
35bd3596a031fac7891f762cc87af610ded13615
|
refs/heads/master
| 2023-06-19T17:57:22.298285
| 2022-11-01T23:56:15
| 2022-11-01T23:56:15
| 33,576,546
| 114
| 81
|
NOASSERTION
| 2023-01-29T17:30:41
| 2015-04-08T00:59:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,283
|
py
|
unserved_load.py
|
# Copyright (c) 2015-2022 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0, which is in the LICENSE file.
"""
Defines components to allow leaving some load unserved. This module is
specially useful when running production costing simulations, though not
strictly required in all cases.
"""
import os
from pyomo.environ import *
dependencies = (
"switch_model.timescales",
"switch_model.balancing.load_areas",
"switch_model.financials",
)
def define_components(mod):
"""
Augments the model with the capability of leaving some load unserved
at a cost.
unserved_load_penalty[z] is the cost penalty of not supplying 1 MWh of
load in any load zone.
UnservedLoad[z, tp] is a decision variable that describes how much
load (MW) is not supplied in a given load zone, at a given timepoint. This
is applied at distribution nodes if available, otherwise at zone-center
nodes.
UnservedLoadPenalty[tp] is an expression that summarizes the cost penalties
of the load that is left unserved in all load zones at a given timepoint.
"""
mod.unserved_load_penalty = Param(within=NonNegativeReals, default=500)
mod.UnservedLoad = Var(mod.LOAD_ZONES, mod.TIMEPOINTS, within=NonNegativeReals)
try:
mod.Distributed_Power_Injections.append("UnservedLoad")
except AttributeError:
mod.Zone_Power_Injections.append("UnservedLoad")
mod.UnservedLoadPenalty = Expression(
mod.TIMEPOINTS,
rule=lambda m, tp: sum(
m.UnservedLoad[z, tp] * m.unserved_load_penalty for z in m.LOAD_ZONES
),
)
mod.Cost_Components_Per_TP.append("UnservedLoadPenalty")
def load_inputs(mod, switch_data, inputs_dir):
"""
The cost penalty of unserved load in units of $/MWh is the only parameter
that can be inputted. The following file is not mandatory, because the
parameter defaults to a value of 500 $/MWh. This file contains one header
row and one data row.
optional input files:
lost_load_cost.csv
unserved_load_penalty
"""
switch_data.load_aug(
filename=os.path.join(inputs_dir, "lost_load_cost.csv"),
optional=True,
param=(mod.unserved_load_penalty,),
)
|
eb9c013dffd7f8267764f396d66a80f38f249eca
|
6032d4c2a046e2c5601099593660807217944019
|
/elephant/statistics.py
|
139c737449d21d3d9987e918412b4cd18e5e3d6d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NeuralEnsemble/elephant
|
6130fc70fcfd4e3e1a91add4ad0273c4bca4782d
|
2bd871aec145d897031aed327a7a4af0102c47cb
|
refs/heads/master
| 2023-09-02T03:31:14.531100
| 2023-07-20T14:00:50
| 2023-07-20T14:00:50
| 10,311,278
| 162
| 89
|
BSD-3-Clause
| 2023-09-14T13:47:26
| 2013-05-27T08:59:34
|
Python
|
UTF-8
|
Python
| false
| false
| 63,884
|
py
|
statistics.py
|
# -*- coding: utf-8 -*-
"""
Statistical measures of spike trains (e.g., Fano factor) and functions to
estimate firing rates.
Rate estimation
***************
.. autosummary::
:toctree: _toctree/statistics/
mean_firing_rate
instantaneous_rate
time_histogram
optimal_kernel_bandwidth
Spike interval statistics
*************************
.. autosummary::
:toctree: _toctree/statistics/
isi
cv
cv2
lv
lvr
Statistics across spike trains
******************************
.. autosummary::
:toctree: _toctree/statistics/
fanofactor
complexity_pdf
Complexity
Tutorial
========
:doc:`View tutorial <../tutorials/statistics>`
Run tutorial interactively:
.. image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master
?filepath=doc/tutorials/statistics.ipynb
References
----------
.. bibliography::
:keyprefix: statistics-
:copyright: Copyright 2014-2023 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import math
import warnings
import neo
from neo.core.spiketrainlist import SpikeTrainList
import numpy as np
import quantities as pq
import scipy.stats
import scipy.signal
from scipy.special import erf
import elephant.conversion as conv
import elephant.kernels as kernels
from elephant.conversion import BinnedSpikeTrain
from elephant.utils import deprecated_alias, check_neo_consistency, \
is_time_quantity, round_binning_errors
# do not import unicode_literals
# (quantities rescale does not work with unicodes)
__all__ = [
"isi",
"mean_firing_rate",
"fanofactor",
"cv",
"cv2",
"lv",
"lvr",
"instantaneous_rate",
"time_histogram",
"complexity_pdf",
"Complexity",
"fftkernel",
"optimal_kernel_bandwidth"
]
cv = scipy.stats.variation
def isi(spiketrain, axis=-1):
"""
Return an array containing the inter-spike intervals of the spike train.
Accepts a `neo.SpikeTrain`, a `pq.Quantity` array, a `np.ndarray`, or a
list of time spikes. If either a `neo.SpikeTrain` or `pq.Quantity` is
provided, the return value will be `pq.Quantity`, otherwise `np.ndarray`.
The units of `pq.Quantity` will be the same as `spiketrain`.
Visualization of this function is covered in Viziphant:
:func:`viziphant.statistics.plot_isi_histogram`.
Parameters
----------
spiketrain : neo.SpikeTrain or pq.Quantity or array-like
The spike times.
axis : int, optional
The axis along which the difference is taken.
Default: the last axis
Returns
-------
intervals : np.ndarray or pq.Quantity
The inter-spike intervals of the `spiketrain`.
Warns
-----
UserWarning
When the input array is not sorted, negative intervals are returned
with a warning.
Examples
--------
>>> from elephant import statistics
>>> statistics.isi([0.3, 4.5, 6.7, 9.3])
array([4.2, 2.2, 2.6])
"""
if isinstance(spiketrain, neo.SpikeTrain):
intervals = np.diff(spiketrain.magnitude, axis=axis)
# np.diff makes a copy
intervals = pq.Quantity(intervals, units=spiketrain.units, copy=False)
else:
intervals = np.diff(spiketrain, axis=axis)
if (intervals < 0).any():
warnings.warn("ISI evaluated to negative values. "
"Please sort the input array.")
return intervals
def mean_firing_rate(spiketrain, t_start=None, t_stop=None, axis=None):
"""
Return the firing rate of the spike train.
The firing rate is calculated as the number of spikes in the spike train
in the range `[t_start, t_stop]` divided by the time interval
`t_stop - t_start`. See the description below for cases when `t_start` or
`t_stop` is None.
Accepts a `neo.SpikeTrain`, a `pq.Quantity` array, or a plain
`np.ndarray`. If either a `neo.SpikeTrain` or `pq.Quantity` array is
provided, the return value will be a `pq.Quantity` array, otherwise a
plain `np.ndarray`. The units of the `pq.Quantity` array will be the
inverse of the `spiketrain`.
Parameters
----------
spiketrain : neo.SpikeTrain or pq.Quantity or np.ndarray
The spike times.
t_start : float or pq.Quantity, optional
The start time to use for the interval.
If None, retrieved from the `t_start` attribute of `spiketrain`. If
that is not present, default to 0. All spiketrain's spike times below
this value are ignored.
Default: None
t_stop : float or pq.Quantity, optional
The stop time to use for the time points.
If not specified, retrieved from the `t_stop` attribute of
`spiketrain`. If that is not present, default to the maximum value of
`spiketrain`. All spiketrain's spike times above this value are
ignored.
Default: None
axis : int, optional
The axis over which to do the calculation; has no effect when the
input is a neo.SpikeTrain, because a neo.SpikeTrain is always a 1-d
vector. If None, do the calculation over the flattened array.
Default: None
Returns
-------
float or pq.Quantity or np.ndarray
The firing rate of the `spiketrain`
Raises
------
TypeError
If the input spiketrain is a `np.ndarray` but `t_start` or `t_stop` is
`pq.Quantity`.
If the input spiketrain is a `neo.SpikeTrain` or `pq.Quantity` but
`t_start` or `t_stop` is not `pq.Quantity`.
ValueError
If the input spiketrain is empty.
Examples
--------
>>> from elephant import statistics
>>> statistics.mean_firing_rate([0.3, 4.5, 6.7, 9.3])
0.4301075268817204
"""
if isinstance(spiketrain, neo.SpikeTrain) and t_start is None \
and t_stop is None and axis is None:
# a faster approach for a typical use case
n_spikes = len(spiketrain)
time_interval = spiketrain.t_stop - spiketrain.t_start
time_interval = time_interval.rescale(spiketrain.units)
rate = n_spikes / time_interval
return rate
if isinstance(spiketrain, pq.Quantity):
# Quantity or neo.SpikeTrain
if not is_time_quantity(t_start, allow_none=True):
raise TypeError("'t_start' must be a Quantity or None")
if not is_time_quantity(t_stop, allow_none=True):
raise TypeError("'t_stop' must be a Quantity or None")
units = spiketrain.units
if t_start is None:
t_start = getattr(spiketrain, 't_start', 0 * units)
t_start = t_start.rescale(units).magnitude
if t_stop is None:
t_stop = getattr(spiketrain, 't_stop',
np.max(spiketrain, axis=axis))
t_stop = t_stop.rescale(units).magnitude
# calculate as a numpy array
rates = mean_firing_rate(spiketrain.magnitude, t_start=t_start,
t_stop=t_stop, axis=axis)
rates = pq.Quantity(rates, units=1. / units)
elif isinstance(spiketrain, (np.ndarray, list, tuple)):
if isinstance(t_start, pq.Quantity) or isinstance(t_stop, pq.Quantity):
raise TypeError("'t_start' and 't_stop' cannot be quantities if "
"'spiketrain' is not a Quantity.")
spiketrain = np.asarray(spiketrain)
if len(spiketrain) == 0:
raise ValueError("Empty input spiketrain.")
if t_start is None:
t_start = 0
if t_stop is None:
t_stop = np.max(spiketrain, axis=axis)
time_interval = t_stop - t_start
if axis and isinstance(t_stop, np.ndarray):
t_stop = np.expand_dims(t_stop, axis)
rates = np.sum((spiketrain >= t_start) & (spiketrain <= t_stop),
axis=axis) / time_interval
else:
raise TypeError("Invalid input spiketrain type: '{}'. Allowed: "
"neo.SpikeTrain, Quantity, ndarray".
format(type(spiketrain)))
return rates
def fanofactor(spiketrains, warn_tolerance=0.1 * pq.ms):
r"""
Evaluates the empirical Fano factor F of the spike counts of
a list of `neo.SpikeTrain` objects.
Given the vector v containing the observed spike counts (one per
spike train) in the time window [t0, t1], F is defined as:
.. math::
F := \frac{var(v)}{mean(v)}
The Fano factor is typically computed for spike trains representing the
activity of the same neuron over different trials. The higher F, the
larger the cross-trial non-stationarity. In theory for a time-stationary
Poisson process, F=1.
Parameters
----------
spiketrains : list
List of `neo.SpikeTrain` or `pq.Quantity` or `np.ndarray` or list of
spike times for which to compute the Fano factor of spike counts.
warn_tolerance : pq.Quantity
In case of a list of input neo.SpikeTrains, if their durations vary by
more than `warn_tolerence` in their absolute values, throw a warning
(see Notes).
Default: 0.1 ms
Returns
-------
fano : float
The Fano factor of the spike counts of the input spike trains.
Returns np.NaN if an empty list is specified, or if all spike trains
are empty.
Raises
------
TypeError
If the input spiketrains are neo.SpikeTrain objects, but
`warn_tolerance` is not a quantity.
Notes
-----
The check for the equal duration of the input spike trains is performed
only if the input is of type`neo.SpikeTrain`: if you pass a numpy array,
please make sure that they all have the same duration manually.
Examples
--------
>>> import neo
>>> from elephant import statistics
>>> spiketrains = [
... neo.SpikeTrain([0.3, 4.5, 6.7, 9.3], t_stop=10, units='s'),
... neo.SpikeTrain([1.4, 3.3, 8.2], t_stop=10, units='s')
... ]
>>> statistics.fanofactor(spiketrains)
0.07142857142857142
"""
# Build array of spike counts (one per spike train)
spike_counts = np.array([len(st) for st in spiketrains])
# Compute FF
if all(count == 0 for count in spike_counts):
# empty list of spiketrains reaches this branch, and NaN is returned
return np.nan
if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):
if not is_time_quantity(warn_tolerance):
raise TypeError("'warn_tolerance' must be a time quantity.")
durations = [(st.t_stop - st.t_start).simplified.item()
for st in spiketrains]
durations_min = min(durations)
durations_max = max(durations)
if durations_max - durations_min > warn_tolerance.simplified.item():
warnings.warn("Fano factor calculated for spike trains of "
"different duration (minimum: {_min}s, maximum "
"{_max}s).".format(_min=durations_min,
_max=durations_max))
fano = spike_counts.var() / spike_counts.mean()
return fano
def __variation_check(v, with_nan):
# ensure the input ia a vector
if v.ndim != 1:
raise ValueError("The input must be a vector, not a {}-dim matrix.".
format(v.ndim))
# ensure we have enough entries
if v.size < 2:
if with_nan:
warnings.warn("The input size is too small. Please provide"
"an input with more than 1 entry. Returning `NaN`"
"since the argument `with_nan` is `True`")
return np.NaN
raise ValueError("Input size is too small. Please provide "
"an input with more than 1 entry. Set 'with_nan' "
"to True to replace the error by a warning.")
return None
@deprecated_alias(v='time_intervals')
def cv2(time_intervals, with_nan=False):
r"""
Calculate the measure of Cv2 for a sequence of time intervals between
events :cite:`statistics-Holt1996_1806`.
Given a vector :math:`I` containing a sequence of intervals, the Cv2 is
defined as:
.. math::
Cv2 := \frac{1}{N} \sum_{i=1}^{N-1}
\frac{2|I_{i+1}-I_i|}
{|I_{i+1}+I_i|}
The Cv2 is typically computed as a substitute for the classical
coefficient of variation (Cv) for sequences of events which include some
(relatively slow) rate fluctuation. As with the Cv, Cv2=1 for a sequence
of intervals generated by a Poisson process.
Parameters
----------
time_intervals : pq.Quantity or np.ndarray or list
Vector of consecutive time intervals.
with_nan : bool, optional
If True, `cv2` of a spike train with less than two spikes results in a
np.NaN value and a warning is raised.
If False, `ValueError` exception is raised with a spike train with
less than two spikes.
Default: True
Returns
-------
float
The Cv2 of the inter-spike interval of the input sequence.
Raises
------
ValueError
If an empty list is specified, or if the sequence has less than two
entries and `with_nan` is False.
If a matrix is passed to the function. Only vector inputs are
supported.
Warns
-----
UserWarning
If `with_nan` is True and `cv2` is calculated for a sequence with less
than two entries, generating a np.NaN.
Examples
--------
>>> from elephant import statistics
>>> statistics.cv2([0.3, 4.5, 6.7, 9.3])
0.8226190476190478
"""
# convert to array, cast to float
time_intervals = np.asarray(time_intervals)
np_nan = __variation_check(time_intervals, with_nan)
if np_nan is not None:
return np_nan
# calculate Cv2 and return result
cv_i = np.diff(time_intervals) / (time_intervals[:-1] + time_intervals[1:])
return 2. * np.mean(np.abs(cv_i))
@deprecated_alias(v='time_intervals')
def lv(time_intervals, with_nan=False):
r"""
Calculate the measure of local variation Lv for a sequence of time
intervals between events :cite:`statistics-Shinomoto2003_2823`.
Given a vector :math:`I` containing a sequence of intervals, the Lv is
defined as:
.. math::
Lv := \frac{1}{N} \sum_{i=1}^{N-1}
\frac{3(I_i-I_{i+1})^2}
{(I_i+I_{i+1})^2}
The Lv is typically computed as a substitute for the classical coefficient
of variation for sequences of events which include some (relatively slow)
rate fluctuation. As with the Cv, Lv=1 for a sequence of intervals
generated by a Poisson process.
Parameters
----------
time_intervals : pq.Quantity or np.ndarray or list
Vector of consecutive time intervals.
with_nan : bool, optional
If True, the Lv of a spike train with less than two spikes results in a
`np.NaN` value and a warning is raised.
If False, a `ValueError` exception is raised with a spike train with
less than two spikes.
Default: True
Returns
-------
float
The Lv of the inter-spike interval of the input sequence.
Raises
------
ValueError
If an empty list is specified, or if the sequence has less than two
entries and `with_nan` is False.
If a matrix is passed to the function. Only vector inputs are
supported.
Warns
-----
UserWarning
If `with_nan` is True and the Lv is calculated for a spike train
with less than two spikes, generating a np.NaN.
Examples
--------
>>> from elephant import statistics
>>> statistics.lv([0.3, 4.5, 6.7, 9.3])
0.8306154336734695
"""
# convert to array, cast to float
time_intervals = np.asarray(time_intervals)
np_nan = __variation_check(time_intervals, with_nan)
if np_nan is not None:
return np_nan
cv_i = np.diff(time_intervals) / (time_intervals[:-1] + time_intervals[1:])
return 3. * np.mean(np.power(cv_i, 2))
def lvr(time_intervals, R=5*pq.ms, with_nan=False):
r"""
Calculate the measure of revised local variation LvR for a sequence of time
intervals between events :cite:`statistics-Shinomoto2009_e1000433`.
Given a vector :math:`I` containing a sequence of intervals, the LvR is
defined as:
.. math::
LvR := \frac{3}{N-1} \sum_{i=1}^{N-1}
\left(1-\frac{4 I_i I_{i+1}}
{(I_i+I_{i+1})^2}\right)
\left(1+\frac{4 R}{I_i+I_{i+1}}\right)
The LvR is a revised version of the Lv, with enhanced invariance to firing
rate fluctuations by introducing a refractoriness constant R. The LvR with
`R=5ms` was shown to outperform other ISI variability measures in spike
trains with firing rate fluctuations and sensory stimuli
:cite:`statistics-Shinomoto2009_e1000433`.
Parameters
----------
time_intervals : pq.Quantity or np.ndarray or list
Vector of consecutive time intervals. Must have time units, if not unit
is passed `ms` are assumed.
R : pq.Quantity or int or float
Refractoriness constant (R >= 0). If no quantity is passed `ms` are
assumed.
Default: 5 ms
with_nan : bool, optional
If True, LvR of a spike train with less than two spikes results in a
np.NaN value and a warning is raised.
If False, a `ValueError` exception is raised with a spike train with
less than two spikes.
Default: True
Returns
-------
float
The LvR of the inter-spike interval of the input sequence.
Raises
------
ValueError
If an empty list is specified, or if the sequence has less than two
entries and `with_nan` is False.
If a matrix is passed to the function. Only vector inputs are
supported.
Warns
-----
UserWarning
If `with_nan` is True and the `lvr` is calculated for a spike train
with less than two spikes, generating a np.NaN.
If R is passed without any units attached milliseconds are assumed.
Examples
--------
>>> from elephant import statistics
>>> statistics.lvr([0.3, 4.5, 6.7, 9.3], R=0.005)
0.833907445980624
"""
if isinstance(R, pq.Quantity):
R = R.rescale('ms').magnitude
else:
warnings.warn('No units specified for R, assuming milliseconds (ms)')
if R < 0:
raise ValueError('R must be >= 0')
# check units of intervals if available
if isinstance(time_intervals, pq.Quantity):
time_intervals = time_intervals.rescale('ms').magnitude
else:
warnings.warn('No units specified for time_intervals,'
' assuming milliseconds (ms)')
# convert to array, cast to float
time_intervals = np.asarray(time_intervals)
np_nan = __variation_check(time_intervals, with_nan)
if np_nan is not None:
return np_nan
N = len(time_intervals)
t = time_intervals[:-1] + time_intervals[1:]
frac1 = 4 * time_intervals[:-1] * time_intervals[1:] / t**2
frac2 = 4 * R / t
lvr = (3 / (N-1)) * np.sum((1-frac1) * (1+frac2))
return lvr
@deprecated_alias(spiketrain='spiketrains')
def instantaneous_rate(spiketrains, sampling_period, kernel='auto',
cutoff=5.0, t_start=None, t_stop=None, trim=False,
center_kernel=True, border_correction=False):
r"""
Estimates instantaneous firing rate by kernel convolution.
Visualization of this function is covered in Viziphant:
:func:`viziphant.statistics.plot_instantaneous_rates_colormesh`.
Parameters
----------
spiketrains : neo.SpikeTrain or list of neo.SpikeTrain
Neo object(s) that contains spike times, the unit of the time stamps,
and `t_start` and `t_stop` of the spike train.
sampling_period : pq.Quantity
Time stamp resolution of the spike times. The same resolution will
be assumed for the kernel.
kernel : 'auto' or Kernel, optional
The string 'auto' or callable object of class `kernels.Kernel`.
The kernel is used for convolution with the spike train and its
standard deviation determines the time resolution of the instantaneous
rate estimation. Currently, implemented kernel forms are rectangular,
triangular, epanechnikovlike, gaussian, laplacian, exponential, and
alpha function.
If 'auto', the optimized kernel width for the rate estimation is
calculated according to :cite:`statistics-Shimazaki2010_171` and a
Gaussian kernel is constructed with this width. Automatized calculation
of the kernel width is not available for other than Gaussian kernel
shapes.
Note: The kernel width is not adaptive, i.e., it is calculated as
global optimum across the data.
Default: 'auto'
cutoff : float, optional
This factor determines the cutoff of the probability distribution of
the kernel, i.e., the considered width of the kernel in terms of
multiples of the standard deviation sigma.
Default: 5.0
t_start : pq.Quantity, optional
Start time of the interval used to compute the firing rate.
If None, `t_start` is assumed equal to `t_start` attribute of
`spiketrain`.
Default: None
t_stop : pq.Quantity, optional
End time of the interval used to compute the firing rate.
If None, `t_stop` is assumed equal to `t_stop` attribute of
`spiketrain`.
Default: None
trim : bool, optional
Accounts for the asymmetry of a kernel.
If False, the output of the Fast Fourier Transformation being a longer
vector than the input vector (ouput = input + kernel - 1) is reduced
back to the original size of the considered time interval of the
`spiketrain` using the median of the kernel. False (no trimming) is
equivalent to 'same' convolution mode for symmetrical kernels.
If True, only the region of the convolved signal is returned, where
there is complete overlap between kernel and spike train. This is
achieved by reducing the length of the output of the Fast Fourier
Transformation by a total of two times the size of the kernel, and
`t_start` and `t_stop` are adjusted. True (trimming) is equivalent to
'valid' convolution mode for symmetrical kernels.
Default: False
center_kernel : bool, optional
If set to True, the kernel will be translated such that its median is
centered on the spike, thus putting equal weight before and after the
spike. If False, no adjustment is performed such that the spike sits at
the origin of the kernel.
Default: True
border_correction : bool, optional
Apply a border correction to prevent underestimating the firing rates
at the borders of the spike trains, i.e., close to t_start and t_stop.
The correction is done by estimating the mass of the kernel outside
these spike train borders under the assumption that the rate does not
change strongly.
Only possible in the case of a Gaussian kernel.
Default: False
Returns
-------
rate : neo.AnalogSignal
2D matrix that contains the rate estimation in unit hertz (Hz) of shape
``(time, len(spiketrains))`` or ``(time, 1)`` in case of a single
input spiketrain. `rate.times` contains the time axis of the rate
estimate: the unit of this property is the same as the resolution that
is given via the argument `sampling_period` to the function.
Raises
------
TypeError
* If `spiketrain` is not an instance of `neo.SpikeTrain`.
* If `sampling_period` is not a `pq.Quantity`.
* If `sampling_period` is not larger than zero.
* If `kernel` is neither instance of `kernels.Kernel` nor string
'auto'.
* If `cutoff` is neither `float` nor `int`.
* If `t_start` and `t_stop` are neither None nor a `pq.Quantity`.
* If `trim` is not `bool`.
ValueError
* If `sampling_period` is smaller than zero.
* If `kernel` is 'auto' and the function was unable to calculate
optimal kernel width for instantaneous rate from input data.
Warns
-----
UserWarning
* If `cutoff` is less than `min_cutoff` attribute of `kernel`, the
width of the kernel is adjusted to a minimally allowed width.
Notes
-----
* The resulting instantaneous firing rate values smaller than ``0``, which
may happen due to machine precision errors, are clipped to zero.
* The instantaneous firing rate estimate is calculated based on half-open
intervals ``[)``, except the last one e.g. if ``t_start = 0s``,
``t_stop = 4s`` and ``sampling_period = 1s``, the intervals are:
``[0, 1)`` ``[1, 2)`` ``[2, 3)`` ``[3, 4]``.
This induces a sampling bias, which can lead to a time shift of the
estimated rate, if the `sampling_period` is chosen large relative to the
duration ``(t_stop - t_start)``. One possibility to counteract this is
to choose a smaller `sampling_period`.
* The last interval of the given duration ``(t_stop - t_start)`` is
dropped if it is shorter than `sampling_period`,
e.g. if ``t_start = 0s``, ``t_stop = 4.5s`` and
``sampling_period = 1s``, the intervals considered are:
``[0, 1)`` ``[1, 2)`` ``[2, 3)`` ``[3, 4]``,
the last interval ``[4, 4.5]`` is excluded from all calculations.
Examples
--------
Example 1. Automatic kernel estimation.
>>> import neo
>>> import quantities as pq
>>> from elephant import statistics
>>> spiketrain = neo.SpikeTrain([0.3, 4.5, 6.7, 9.3], t_stop=10, units='s')
>>> rate = statistics.instantaneous_rate(spiketrain,
... sampling_period=10 * pq.ms,
... kernel='auto')
>>> rate.annotations['kernel']
{'type': 'GaussianKernel', 'sigma': '7.273225922958104 s', 'invert': False}
>>> print(rate.sampling_rate)
0.1 1/ms
Example 2. Manually set kernel.
>>> from elephant import kernels
>>> spiketrain = neo.SpikeTrain([0], t_stop=1, units='s')
>>> kernel = kernels.GaussianKernel(sigma=300 * pq.ms)
>>> rate = statistics.instantaneous_rate(spiketrain,
... sampling_period=200 * pq.ms, kernel=kernel, t_start=-1 * pq.s)
>>> rate
<AnalogSignal(array([[0.01007419],
[0.05842767],
[0.22928759],
[0.60883028],
[1.0938699 ],
[1.3298076 ],
[1.0938699 ],
[0.60883028],
[0.22928759],
[0.05842767]]) * Hz, [-1.0 s, 1.0 s], sampling rate: 0.005 1/ms)>
>>> rate.magnitude
array([[0.01007419],
[0.05842767],
[0.22928759],
[0.60883028],
[1.0938699 ],
[1.3298076 ],
[1.0938699 ],
[0.60883028],
[0.22928759],
[0.05842767]])
"""
def optimal_kernel(st):
width_sigma = None
if len(st) > 0:
width_sigma = optimal_kernel_bandwidth(
st.magnitude, times=None, bootstrap=False)['optw']
if width_sigma is None:
raise ValueError("Unable to calculate optimal kernel width for "
"instantaneous rate from input data.")
return kernels.GaussianKernel(width_sigma * st.units)
if border_correction and not \
(kernel == 'auto' or isinstance(kernel, kernels.GaussianKernel)):
raise ValueError(
'The border correction is only implemented'
' for Gaussian kernels.')
if isinstance(spiketrains, neo.SpikeTrain):
if kernel == 'auto':
kernel = optimal_kernel(spiketrains)
spiketrains = [spiketrains]
if not all([isinstance(elem, neo.SpikeTrain) for elem in spiketrains]):
raise TypeError(f"'spiketrains' must be a list of neo.SpikeTrain's or "
f"a single neo.SpikeTrain. Found: {type(spiketrains)}")
if not is_time_quantity(sampling_period):
raise TypeError(f"The 'sampling_period' must be a time Quantity."
f"Found: {type(sampling_period)}")
if sampling_period.magnitude < 0:
raise ValueError(f"The 'sampling_period' ({sampling_period}) "
f"must be non-negative.")
if not (isinstance(kernel, kernels.Kernel) or kernel == 'auto'):
raise TypeError(f"'kernel' must be instance of class "
f"elephant.kernels.Kernel or string 'auto'. Found: "
f"{type(kernel)}, value {str(kernel)}")
if not isinstance(cutoff, (float, int)):
raise TypeError("'cutoff' must be float or integer")
if not is_time_quantity(t_start, allow_none=True):
raise TypeError("'t_start' must be a time Quantity")
if not is_time_quantity(t_stop, allow_none=True):
raise TypeError("'t_stop' must be a time Quantity")
if not isinstance(trim, bool):
raise TypeError("'trim' must be bool")
check_neo_consistency(spiketrains,
object_type=neo.SpikeTrain,
t_start=t_start, t_stop=t_stop)
if kernel == 'auto':
if len(spiketrains) == 1:
kernel = optimal_kernel(spiketrains[0])
else:
raise ValueError("Cannot estimate a kernel for a list of spike "
"trains. Please provide a kernel explicitly "
"rather than 'auto'.")
if t_start is None:
t_start = spiketrains[0].t_start
if t_stop is None:
t_stop = spiketrains[0].t_stop
# Rescale units for consistent calculation
t_start = t_start.rescale(spiketrains[0].units)
t_stop = t_stop.rescale(spiketrains[0].units)
# Calculate parameters for np.histogram
n_bins = int(((t_stop - t_start) / sampling_period).simplified)
hist_range_end = t_start + n_bins * \
sampling_period.rescale(spiketrains[0].units)
hist_range = (t_start.item(), hist_range_end.item())
# Preallocation
histogram_arr = np.zeros((len(spiketrains), n_bins), dtype=np.float64)
for i, st in enumerate(spiketrains):
histogram_arr[i], _ = np.histogram(st.magnitude, bins=n_bins,
range=hist_range)
histogram_arr = histogram_arr.T # make it (time, units)
# Kernel
if cutoff < kernel.min_cutoff:
cutoff = kernel.min_cutoff
warnings.warn("The width of the kernel was adjusted to a minimally "
"allowed width.")
scaling_unit = pq.CompoundUnit(f"{sampling_period.rescale('s').item()}*s")
cutoff_sigma = cutoff * kernel.sigma.rescale(scaling_unit).magnitude
if center_kernel: # t_arr is centered on the kernel median.
median = kernel.icdf(0.5).rescale(scaling_unit).item()
else:
median = 0
# An odd number of points correctly resolves the median index of the
# kernel. This avoids a timeshift in the rate estimate for symmetric
# kernels. A number x given by 'x = 2 * n + 1' with n being an integer is
# always odd. Using `math.ceil` to calculate `t_arr_kernel_half` ensures an
# integer value, hence the number of points for the kernel (num) given by
# `num=2 * t_arr_kernel_half + 1` is always odd.
# (See Issue #360, https://github.com/NeuralEnsemble/elephant/issues/360)
t_arr_kernel_half = math.ceil(
cutoff * (kernel.sigma / sampling_period).simplified.item())
t_arr_kernel_length = 2 * t_arr_kernel_half + 1
# Shift kernel using the calculated median
t_arr_kernel = np.linspace(start=-cutoff_sigma + median,
stop=cutoff_sigma + median,
num=t_arr_kernel_length,
endpoint=True) * scaling_unit
# Calculate the kernel values with t_arr
kernel_arr = np.expand_dims(
kernel(t_arr_kernel).rescale(pq.Hz).magnitude, axis=1)
# Define mode for scipy.signal.fftconvolve
if trim:
fft_mode = 'valid'
else:
fft_mode = 'same'
rate = scipy.signal.fftconvolve(histogram_arr,
kernel_arr,
mode=fft_mode)
# The convolution of non-negative vectors is non-negative
rate = np.clip(rate, a_min=0, a_max=None, out=rate)
# Adjust t_start and t_stop
if fft_mode == 'valid':
median_id = kernel.median_index(t_arr_kernel)
kernel_array_size = len(kernel_arr)
t_start = t_start + median_id * scaling_unit
t_stop = t_stop - (kernel_array_size - median_id) * scaling_unit
kernel_annotation = dict(type=type(kernel).__name__,
sigma=str(kernel.sigma),
invert=kernel.invert)
rate = neo.AnalogSignal(signal=rate,
sampling_period=sampling_period,
units=pq.Hz, t_start=t_start, t_stop=t_stop,
kernel=kernel_annotation)
if border_correction:
sigma = kernel.sigma.simplified.magnitude
times = rate.times.simplified.magnitude
correction_factor = 2 / (
erf((t_stop.simplified.magnitude - times) / (
np.sqrt(2.) * sigma))
- erf((t_start.simplified.magnitude - times) / (
np.sqrt(2.) * sigma)))
rate *= correction_factor[:, None]
duration = t_stop.simplified.magnitude - t_start.simplified.magnitude
# ensure integral over firing rate yield the exact number of spikes
for i, spiketrain in enumerate(spiketrains):
if len(spiketrain) > 0:
rate[:, i] *= len(spiketrain) /\
(np.mean(rate[:, i]).magnitude * duration)
return rate
@deprecated_alias(binsize='bin_size')
def time_histogram(spiketrains, bin_size, t_start=None, t_stop=None,
output='counts', binary=False):
"""
Time Histogram of a list of `neo.SpikeTrain` objects.
Visualization of this function is covered in Viziphant:
:func:`viziphant.statistics.plot_time_histogram`.
Parameters
----------
spiketrains : list of neo.SpikeTrain
`neo.SpikeTrain`s with a common time axis (same `t_start` and `t_stop`)
bin_size : pq.Quantity
Width of the histogram's time bins.
t_start : pq.Quantity, optional
Start time of the histogram. Only events in `spiketrains` falling
between `t_start` and `t_stop` (both included) are considered in the
histogram.
If None, the maximum `t_start` of all `neo.SpikeTrain`s is used as
`t_start`.
Default: None
t_stop : pq.Quantity, optional
Stop time of the histogram. Only events in `spiketrains` falling
between `t_start` and `t_stop` (both included) are considered in the
histogram.
If None, the minimum `t_stop` of all `neo.SpikeTrain`s is used as
`t_stop`.
Default: None
output : {'counts', 'mean', 'rate'}, optional
Normalization of the histogram. Can be one of:
* 'counts': spike counts at each bin (as integer numbers).
* 'mean': mean spike counts per spike train.
* 'rate': mean spike rate per spike train. Like 'mean', but the
counts are additionally normalized by the bin width.
Default: 'counts'
binary : bool, optional
If True, indicates whether all `neo.SpikeTrain` objects should first
be binned to a binary representation (using the
`conversion.BinnedSpikeTrain` class) and the calculation of the
histogram is based on this representation.
Note that the output is not binary, but a histogram of the converted,
binary representation.
Default: False
Returns
-------
neo.AnalogSignal
A `neo.AnalogSignal` object containing the histogram values.
`neo.AnalogSignal[j]` is the histogram computed between
`t_start + j * bin_size` and `t_start + (j + 1) * bin_size`.
Raises
------
ValueError
If `output` is not 'counts', 'mean' or 'rate'.
Warns
-----
UserWarning
If `t_start` is None and the objects in `spiketrains` have different
`t_start` values.
If `t_stop` is None and the objects in `spiketrains` have different
`t_stop` values.
See also
--------
elephant.conversion.BinnedSpikeTrain
Examples
--------
>>> import neo
>>> import quantities as pq
>>> from elephant import statistics
>>> spiketrains = [
... neo.SpikeTrain([0.3, 4.5, 6.7, 9.3], t_stop=10, units='s'),
... neo.SpikeTrain([0.7, 4.3, 8.2], t_stop=10, units='s')
... ]
>>> hist = statistics.time_histogram(spiketrains, bin_size=1 * pq.s)
>>> hist
<AnalogSignal(array([[2],
[0],
[0],
[0],
[2],
[0],
[1],
[0],
[1],
[1]]) * dimensionless, [0.0 s, 10.0 s], sampling rate: 1.0 1/s)>
>>> hist.magnitude.flatten()
array([2, 0, 0, 0, 2, 0, 1, 0, 1, 1])
"""
# Bin the spike trains and sum across columns
bs = BinnedSpikeTrain(spiketrains, t_start=t_start, t_stop=t_stop,
bin_size=bin_size)
if binary:
bs = bs.binarize(copy=False)
bin_hist = bs.get_num_of_spikes(axis=0)
# Flatten array
bin_hist = np.ravel(bin_hist)
# Renormalise the histogram
if output == 'counts':
# Raw
bin_hist = pq.Quantity(bin_hist, units=pq.dimensionless, copy=False)
elif output == 'mean':
# Divide by number of input spike trains
bin_hist = pq.Quantity(bin_hist / len(spiketrains),
units=pq.dimensionless, copy=False)
elif output == 'rate':
# Divide by number of input spike trains and bin width
bin_hist = bin_hist / (len(spiketrains) * bin_size)
else:
raise ValueError(f'Parameter output ({output}) is not valid.')
return neo.AnalogSignal(signal=np.expand_dims(bin_hist, axis=1),
sampling_period=bin_size, units=bin_hist.units,
t_start=bs.t_start, normalization=output,
copy=False)
@deprecated_alias(binsize='bin_size')
def complexity_pdf(spiketrains, bin_size):
"""
Complexity Distribution of a list of `neo.SpikeTrain` objects
:cite:`statistics-Gruen2007_96`.
Deprecated in favor of :meth:`Complexity.pdf`.
Probability density computed from the complexity histogram which is the
histogram of the entries of the population histogram of clipped (binary)
spike trains computed with a bin width of `bin_size`.
It provides for each complexity (== number of active neurons per bin) the
number of occurrences. The normalization of that histogram to 1 is the
probability density.
Parameters
----------
spiketrains : list of neo.SpikeTrain
Spike trains with a common time axis (same `t_start` and `t_stop`)
bin_size : pq.Quantity
Width of the histogram's time bins.
Returns
-------
complexity_distribution : neo.AnalogSignal
A `neo.AnalogSignal` object containing the histogram values.
`neo.AnalogSignal[j]` is the histogram computed between
`t_start + j * bin_size` and `t_start + (j + 1) * bin_size`.
See also
--------
elephant.conversion.BinnedSpikeTrain
"""
warnings.warn("'complexity_pdf' is deprecated in favor of the Complexity "
"class which has a 'pdf' method", DeprecationWarning)
complexity = Complexity(spiketrains, bin_size=bin_size)
return complexity.pdf()
class Complexity(object):
"""
Class for complexity distribution (i.e. number of synchronous spikes found)
:cite:`statistics-Gruen2007_96` of a list of `neo.SpikeTrain` objects.
Complexity is calculated by counting the number of spikes (i.e. non-empty
bins) that occur separated by `spread - 1` or less empty bins, within and
across spike trains in the `spiketrains` list.
Implementation (without spread) is based on the cited above paper.
Parameters
----------
spiketrains : list of neo.SpikeTrain
Spike trains with a common time axis (same `t_start` and `t_stop`)
sampling_rate : pq.Quantity or None, optional
Sampling rate of the spike trains with units of 1/time.
Used to shift the epoch edges in order to avoid rounding errors.
If None using the epoch to slice spike trains may introduce
rounding errors.
Default: None
bin_size : pq.Quantity or None, optional
Width of the histogram's time bins with units of time.
The user must specify the `bin_size` or the `sampling_rate`.
* If None and the `sampling_rate` is available
1/`sampling_rate` is used.
* If both are given then `bin_size` is used.
Default: None
binary : bool, optional
* If True then the time histograms will only count the number of
neurons which spike in each bin.
* If False the total number of spikes per bin is counted in the
time histogram.
Default: True
spread : int, optional
Number of bins in which to check for synchronous spikes.
Spikes that occur separated by `spread - 1` or less empty bins are
considered synchronous.
* ``spread = 0`` corresponds to a bincount accross spike trains.
* ``spread = 1`` corresponds to counting consecutive spikes.
* ``spread = 2`` corresponds to counting consecutive spikes and
spikes separated by exactly 1 empty bin.
* ``spread = n`` corresponds to counting spikes separated by exactly
or less than `n - 1` empty bins.
Default: 0
tolerance : float or None, optional
Tolerance for rounding errors in the binning process and in the input
data.
If None possible binning errors are not accounted for.
Default: 1e-8
Attributes
----------
epoch : neo.Epoch
An epoch object containing complexity values, left edges and durations
of all intervals with at least one spike.
* ``epoch.array_annotations['complexity']`` contains the
complexity values per spike.
* ``epoch.times`` contains the left edges.
* ``epoch.durations`` contains the durations.
time_histogram : neo.Analogsignal
A `neo.AnalogSignal` object containing the histogram values.
`neo.AnalogSignal[j]` is the histogram computed between
`t_start + j * binsize` and `t_start + (j + 1) * binsize`.
* If ``binary = True`` : Number of neurons that spiked in each bin,
regardless of the number of spikes.
* If ``binary = False`` : Number of neurons and spikes per neurons
in each bin.
complexity_histogram : np.ndarray
The number of occurrences of events of different complexities.
`complexity_hist[i]` corresponds to the number of events of
complexity `i` for `i > 0`.
Raises
------
ValueError
When `t_stop` is smaller than `t_start`.
When both `sampling_rate` and `bin_size` are not specified.
When `spread` is not a positive integer.
When `spiketrains` is an empty list.
When `t_start` is not the same for all spiketrains
When `t_stop` is not the same for all spiketrains
TypeError
When `spiketrains` is not a list.
When the elements in `spiketrains` are not instances of neo.SpikeTrain
Warns
-----
UserWarning
If no sampling rate is supplied which may lead to rounding errors
when using the epoch to slice spike trains.
Notes
-----
Note that with most common parameter combinations spike times can end up
on bin edges. This makes the binning susceptible to rounding errors which
is accounted for by moving spikes which are within tolerance of the next
bin edge into the following bin. This can be adjusted using the tolerance
parameter and turned off by setting `tolerance=None`.
See also
--------
elephant.conversion.BinnedSpikeTrain
elephant.spike_train_synchrony.Synchrotool
Examples
--------
>>> import neo
>>> import quantities as pq
>>> from elephant.statistics import Complexity
>>> sampling_rate = 1/pq.ms
>>> st1 = neo.SpikeTrain([1, 4, 6] * pq.ms, t_stop=10.0 * pq.ms)
>>> st2 = neo.SpikeTrain([1, 5, 8] * pq.ms, t_stop=10.0 * pq.ms)
>>> sts = [st1, st2]
>>> # spread = 0, a simple bincount
>>> cpx = Complexity(sts, sampling_rate=sampling_rate)
Complexity calculated at sampling rate precision
>>> print(cpx.complexity_histogram)
[5 4 1]
>>> print(cpx.time_histogram.flatten())
[0 2 0 0 1 1 1 0 1 0] dimensionless
>>> print(cpx.time_histogram.times)
[0. 1. 2. 3. 4. 5. 6. 7. 8. 9.] ms
>>> # spread = 1, consecutive spikes
>>> cpx = Complexity(sts, sampling_rate=sampling_rate, spread=1)
Complexity calculated at sampling rate precision
>>> print(cpx.complexity_histogram) # doctest: +SKIP
[5 4 1]
>>> print(cpx.time_histogram.flatten())
[0 2 0 0 3 3 3 0 1 0] dimensionless
>>> # spread = 2, consecutive spikes and separated by 1 empty bin
>>> cpx = Complexity(sts, sampling_rate=sampling_rate, spread=2)
Complexity calculated at sampling rate precision
>>> print(cpx.complexity_histogram)
[4 0 1 0 1]
>>> print(cpx.time_histogram.flatten())
[0 2 0 0 4 4 4 4 4 0] dimensionless
>>> pdf1 = cpx.pdf()
>>> pdf1 # noqa
<AnalogSignal(array([[0.66666667],
[0. ],
[0.16666667],
[0. ],
[0.16666667]]) * dimensionless, [0.0 dimensionless, 5.0 dimensionless], sampling rate: 1.0 dimensionless)>
>>> pdf1.magnitude # doctest: +SKIP
array([[0.5],
[0.4],
[0.1]])
"""
def __init__(self, spiketrains,
sampling_rate=None,
bin_size=None,
binary=True,
spread=0,
tolerance=1e-8):
check_neo_consistency(spiketrains, object_type=neo.SpikeTrain)
if bin_size is None and sampling_rate is None:
raise ValueError('No bin_size or sampling_rate was specified!')
if spread < 0:
raise ValueError('Spread must be >=0')
self.input_spiketrains = spiketrains
self.t_start = spiketrains[0].t_start
self.t_stop = spiketrains[0].t_stop
self.sampling_rate = sampling_rate
self.bin_size = bin_size
self.binary = binary
self.spread = spread
self.tolerance = tolerance
if bin_size is None and sampling_rate is not None:
self.bin_size = 1 / self.sampling_rate
if spread == 0:
self.time_histogram, self.complexity_histogram = \
self._histogram_no_spread()
self.epoch = self._epoch_no_spread()
else:
self.epoch = self._epoch_with_spread()
self.time_histogram, self.complexity_histogram = \
self._histogram_with_spread()
def pdf(self):
"""
Probability density computed from the complexity histogram.
Returns
-------
pdf : neo.AnalogSignal
A `neo.AnalogSignal` object containing the pdf values.
`neo.AnalogSignal[j]` is the histogram computed between
`t_start + j * binsize` and `t_start + (j + 1) * binsize`.
"""
norm_hist = self.complexity_histogram / self.complexity_histogram.sum()
# Convert the Complexity pdf to an neo.AnalogSignal
pdf = neo.AnalogSignal(
np.expand_dims(norm_hist, axis=1),
units=pq.dimensionless,
t_start=0 * pq.dimensionless,
sampling_period=1 * pq.dimensionless)
return pdf
def _histogram_no_spread(self):
"""
Calculate the complexity histogram and time histogram for `spread` = 0
"""
# Computing the population histogram with parameter binary=True to
# clip the spike trains before summing
time_hist = time_histogram(self.input_spiketrains,
self.bin_size,
binary=self.binary)
time_hist_magnitude = time_hist.magnitude.flatten()
# Computing the histogram of the entries of pophist
complexity_hist = np.bincount(time_hist_magnitude)
return time_hist, complexity_hist
def _histogram_with_spread(self):
"""
Calculate the complexity histogram and time histogram for `spread` > 0
"""
complexity_hist = np.bincount(
self.epoch.array_annotations['complexity'])
num_bins = (self.t_stop - self.t_start).rescale(
self.bin_size.units).item() / self.bin_size.item()
num_bins = round_binning_errors(num_bins, tolerance=self.tolerance)
time_hist = np.zeros(num_bins, dtype=int)
start_bins = (self.epoch.times - self.t_start).rescale(
self.bin_size.units).magnitude / self.bin_size.item()
stop_bins = (self.epoch.times + self.epoch.durations - self.t_start
).rescale(self.bin_size.units
).magnitude / self.bin_size.item()
if self.sampling_rate is not None:
shift = (.5 / self.sampling_rate / self.bin_size).simplified.item()
# account for the first bin not being shifted in the epoch creation
# if the shift would move it past t_start
if self.epoch.times[0] == self.t_start:
start_bins[1:] += shift
else:
start_bins += shift
stop_bins += shift
start_bins = round_binning_errors(start_bins, tolerance=self.tolerance)
stop_bins = round_binning_errors(stop_bins, tolerance=self.tolerance)
for idx, (start, stop) in enumerate(zip(start_bins, stop_bins)):
time_hist[start:stop] = \
self.epoch.array_annotations['complexity'][idx]
time_hist = neo.AnalogSignal(
signal=np.expand_dims(time_hist, axis=1),
sampling_period=self.bin_size, units=pq.dimensionless,
t_start=self.t_start)
empty_bins = (self.t_stop - self.t_start - self.epoch.durations.sum())
empty_bins = empty_bins.rescale(self.bin_size.units
).magnitude / self.bin_size.item()
empty_bins = round_binning_errors(empty_bins, tolerance=self.tolerance)
complexity_hist[0] = empty_bins
return time_hist, complexity_hist
def _epoch_no_spread(self):
"""
Get an epoch object of the complexity distribution with `spread` = 0
"""
left_edges = self.time_histogram.times
durations = self.bin_size * np.ones(self.time_histogram.shape)
if self.sampling_rate:
# ensure that spikes are not on the bin edges
bin_shift = .5 / self.sampling_rate
left_edges -= bin_shift
# Ensure that an epoch does not start before the minimum t_start.
# Note: all spike trains share the same t_start and t_stop.
if left_edges[0] < self.t_start:
left_edges[0] = self.t_start
durations[0] -= bin_shift
else:
warnings.warn('No sampling rate specified. '
'Note that using the complexity epoch to get '
'precise spike times can lead to rounding errors.')
complexity = self.time_histogram.magnitude.flatten()
complexity = complexity.astype(np.uint16)
epoch = neo.Epoch(left_edges,
durations=durations,
array_annotations={'complexity': complexity})
return epoch
def _epoch_with_spread(self):
"""
Get an epoch object of the complexity distribution with `spread` > 0
"""
bst = conv.BinnedSpikeTrain(self.input_spiketrains,
binsize=self.bin_size,
tolerance=self.tolerance)
if self.binary:
bst = bst.binarize(copy=False)
bincount = bst.get_num_of_spikes(axis=0)
nonzero_indices = np.nonzero(bincount)[0]
left_diff = np.diff(nonzero_indices,
prepend=-self.spread - 1)
right_diff = np.diff(nonzero_indices,
append=len(bincount) + self.spread + 1)
# standalone bins (no merging required)
single_bin_indices = np.logical_and(left_diff > self.spread,
right_diff > self.spread)
single_bins = nonzero_indices[single_bin_indices]
# bins separated by fewer than spread bins form clusters
# that have to be merged
cluster_start_indices = np.logical_and(left_diff > self.spread,
right_diff <= self.spread)
cluster_starts = nonzero_indices[cluster_start_indices]
cluster_stop_indices = np.logical_and(left_diff <= self.spread,
right_diff > self.spread)
cluster_stops = nonzero_indices[cluster_stop_indices] + 1
single_bin_complexities = bincount[single_bins]
cluster_complexities = [bincount[start:stop].sum()
for start, stop in zip(cluster_starts,
cluster_stops)]
# merge standalone bins and clusters and sort them
combined_starts = np.concatenate((single_bins, cluster_starts))
combined_stops = np.concatenate((single_bins + 1, cluster_stops))
combined_complexities = np.concatenate((single_bin_complexities,
cluster_complexities))
sorting = np.argsort(combined_starts, kind='mergesort')
left_edges = bst.bin_edges[combined_starts[sorting]]
right_edges = bst.bin_edges[combined_stops[sorting]]
complexities = combined_complexities[sorting].astype(np.uint16)
if self.sampling_rate:
# ensure that spikes are not on the bin edges
bin_shift = .5 / self.sampling_rate
left_edges -= bin_shift
right_edges -= bin_shift
else:
warnings.warn('No sampling rate specified. '
'Note that using the complexity epoch to get '
'precise spike times can lead to rounding errors.')
# Ensure that an epoch does not start before the minimum t_start.
# Note: all spike trains share the same t_start and t_stop.
left_edges[0] = max(self.t_start, left_edges[0])
complexity_epoch = neo.Epoch(times=left_edges,
durations=right_edges - left_edges,
array_annotations={'complexity':
complexities})
return complexity_epoch
def nextpow2(x):
"""
Return the smallest integral power of 2 that is equal or larger than `x`.
"""
log2_n = math.ceil(math.log2(x))
n = 2 ** log2_n
return n
def fftkernel(x, w):
"""
Applies the Gauss kernel smoother to an input signal using FFT algorithm.
Parameters
----------
x : np.ndarray
Vector with sample signal.
w : float
Kernel bandwidth (the standard deviation) in unit of the sampling
resolution of `x`.
Returns
-------
y : np.ndarray
The smoothed signal.
Notes
-----
1. MAY 5/23, 2012 Author Hideaki Shimazaki
RIKEN Brain Science Insitute
http://2000.jukuin.keio.ac.jp/shimazaki
2. Ported to Python: Subhasis Ray, NCBS. Tue Jun 10 10:42:38 IST 2014
"""
L = len(x)
Lmax = L + 3 * w
n = nextpow2(Lmax)
X = np.fft.fft(x, n)
f = np.arange(0, n, 1.0) / n
f = np.concatenate((-f[:int(n / 2)], f[int(n / 2):0:-1]))
K = np.exp(-0.5 * (w * 2 * np.pi * f) ** 2)
y = np.fft.ifft(X * K, n)
y = y[:L].copy()
return y
def logexp(x):
if x < 1e2:
y = np.log(1 + np.exp(x))
else:
y = x
return y
def ilogexp(x):
if x < 1e2:
y = np.log(np.exp(x) - 1)
else:
y = x
return y
def cost_function(x, N, w, dt):
"""
Computes the cost function for `sskernel`.
Cn(w) = sum_{i,j} int k(x - x_i) k(x - x_j) dx - 2 sum_{i~=j} k(x_i - x_j)
"""
yh = np.abs(fftkernel(x, w / dt)) # density
# formula for density
C = np.sum(yh ** 2) * dt - 2 * np.sum(yh * x) * \
dt + 2 / np.sqrt(2 * np.pi) / w / N
C = C * N * N
# formula for rate
# C = dt*sum( yh.^2 - 2*yh.*y_hist + 2/sqrt(2*pi)/w*y_hist )
return C, yh
@deprecated_alias(tin='times', w='bandwidth')
def optimal_kernel_bandwidth(spiketimes, times=None, bandwidth=None,
bootstrap=False):
"""
Calculates optimal fixed kernel bandwidth
:cite:`statistics-Shimazaki2010_171`, given as the standard deviation
sigma.
Original matlab code (sskernel.m)
http://2000.jukuin.keio.ac.jp/shimazaki/res/kernel.html has been ported to
Python by Subhasis Ray, NCBS.
Parameters
----------
spiketimes : np.ndarray
Sequence of spike times (sorted to be ascending).
times : np.ndarray or None, optional
Time points at which the kernel bandwidth is to be estimated.
If None, `spiketimes` is used.
Default: None
bandwidth : np.ndarray or None, optional
Vector of kernel bandwidths (standard deviation sigma).
If specified, optimal bandwidth is selected from this.
If None, `bandwidth` is obtained through a golden-section search on a
log-exp scale.
Default: None
bootstrap : bool, optional
If True, calculates the 95% confidence interval using Bootstrap.
Default: False
Returns
-------
dict
'y' : np.ndarray
Estimated density.
't' : np.ndarray
Points at which estimation was computed.
'optw' : float
Optimal kernel bandwidth given as standard deviation sigma
'w' : np.ndarray
Kernel bandwidths examined (standard deviation sigma).
'C' : np.ndarray
Cost functions of `bandwidth`.
'confb95' : tuple of np.ndarray
Bootstrap 95% confidence interval: (lower level, upper level).
If `bootstrap` is False, `confb95` is None.
'yb' : np.ndarray
Bootstrap samples.
If `bootstrap` is False, `yb` is None.
If no optimal kernel could be found, all entries of the dictionary are
set to None.
"""
if times is None:
time = np.max(spiketimes) - np.min(spiketimes)
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
times = np.linspace(np.min(spiketimes),
np.max(spiketimes),
min(int(time / dt + 0.5),
1000)) # The 1000 seems somewhat arbitrary
t = times
else:
time = np.max(times) - np.min(times)
spiketimes = spiketimes[(spiketimes >= np.min(times)) &
(spiketimes <= np.max(times))].copy()
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
if dt > np.min(np.diff(times)):
t = np.linspace(np.min(times), np.max(times),
min(int(time / dt + 0.5), 1000))
else:
t = times
dt = np.min(np.diff(times))
yhist, bins = np.histogram(spiketimes, np.r_[t - dt / 2, t[-1] + dt / 2])
N = np.sum(yhist)
yhist = yhist / (N * dt) # density
optw = None
y = None
if bandwidth is not None:
C = np.zeros(len(bandwidth))
Cmin = np.inf
for k, w_ in enumerate(bandwidth):
C[k], yh = cost_function(yhist, N, w_, dt)
if C[k] < Cmin:
Cmin = C[k]
optw = w_
y = yh
else:
# Golden section search on a log-exp scale
wmin = 2 * dt
wmax = max(spiketimes) - min(spiketimes)
imax = 20 # max iterations
bandwidth = np.zeros(imax)
C = np.zeros(imax)
tolerance = 1e-5
phi = 0.5 * (np.sqrt(5) + 1) # The Golden ratio
a = ilogexp(wmin)
b = ilogexp(wmax)
c1 = (phi - 1) * a + (2 - phi) * b
c2 = (2 - phi) * a + (phi - 1) * b
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
k = 0
while (np.abs(b - a) > (tolerance * (np.abs(c1) + np.abs(c2)))) \
and (k < imax):
if f1 < f2:
b = c2
c2 = c1
c1 = (phi - 1) * a + (2 - phi) * b
f2 = f1
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
bandwidth[k] = logexp(c1)
C[k] = f1
optw = logexp(c1)
y = y1 / (np.sum(y1 * dt))
else:
a = c1
c1 = c2
c2 = (2 - phi) * a + (phi - 1) * b
f1 = f2
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
bandwidth[k] = logexp(c2)
C[k] = f2
optw = logexp(c2)
y = y2 / np.sum(y2 * dt)
k = k + 1
# Bootstrap confidence intervals
confb95 = None
yb = None
# If bootstrap is requested, and an optimal kernel was found
if bootstrap and optw:
nbs = 1000
yb = np.zeros((nbs, len(times)))
for ii in range(nbs):
idx = np.floor(np.random.rand(N) * N).astype(int)
xb = spiketimes[idx]
y_histb, bins = np.histogram(
xb, np.r_[t - dt / 2, t[-1] + dt / 2]) / dt / N
yb_buf = fftkernel(y_histb, optw / dt).real
yb_buf = yb_buf / np.sum(yb_buf * dt)
yb[ii, :] = np.interp(times, t, yb_buf)
ybsort = np.sort(yb, axis=0)
y95b = ybsort[np.floor(0.05 * nbs).astype(int), :]
y95u = ybsort[np.floor(0.95 * nbs).astype(int), :]
confb95 = (y95b, y95u)
# Only perform interpolation if y could be calculated
if y is not None:
y = np.interp(times, t, y)
return {'y': y,
't': times,
'optw': optw,
'w': bandwidth,
'C': C,
'confb95': confb95,
'yb': yb}
def sskernel(*args, **kwargs):
warnings.warn("'sskernel' function is deprecated; "
"use 'optimal_kernel_bandwidth'", DeprecationWarning)
return optimal_kernel_bandwidth(*args, **kwargs)
|
0bd6d7757a1b9d0209c2a80aa0f608155b920931
|
ae72088480e7c2112a5465fadac23a14fbd0ede9
|
/zookeeper-contrib/zookeeper-contrib-monitoring/check_zookeeper.py
|
0a08e84a7cc876c67463c6b7342ea03364eeac34
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/zookeeper
|
f4a45cf12957a8da03c9f1f88f6194e7ce11d6d7
|
03a36d08e257c43e8377e5549d5524805fc6b8bb
|
refs/heads/master
| 2023-08-31T17:46:25.779817
| 2023-08-30T03:25:41
| 2023-08-30T03:25:41
| 160,999
| 12,080
| 7,860
|
Apache-2.0
| 2023-09-10T14:36:38
| 2009-03-27T15:41:56
|
Java
|
UTF-8
|
Python
| false
| false
| 13,057
|
py
|
check_zookeeper.py
|
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Check Zookeeper Cluster
Generic monitoring script that could be used with multiple platforms (Ganglia, Nagios, Cacti).
It requires ZooKeeper 3.4.0 or greater. The script needs the 'mntr' 4letter word
command (patch ZOOKEEPER-744) that was now commited to the trunk.
The script also works with ZooKeeper 3.3.x but in a limited way.
"""
import sys
import socket
import logging
import re
import subprocess
from StringIO import StringIO
from optparse import OptionParser, OptionGroup
__version__ = (0, 1, 0)
log = logging.getLogger()
logging.basicConfig(level=logging.ERROR)
class NagiosHandler(object):
@classmethod
def register_options(cls, parser):
group = OptionGroup(parser, 'Nagios specific options')
group.add_option('-w', '--warning', dest='warning')
group.add_option('-c', '--critical', dest='critical')
parser.add_option_group(group)
def analyze(self, opts, cluster_stats):
try:
warning = int(opts.warning)
critical = int(opts.critical)
except (TypeError, ValueError):
print >>sys.stderr, 'Invalid values for "warning" and "critical".'
return 2
if opts.key is None:
print >>sys.stderr, 'You should specify a key name.'
return 2
warning_state, critical_state, values = [], [], []
for host, stats in cluster_stats.items():
if opts.key in stats:
value = stats[opts.key]
values.append('%s=%s;%s;%s' % (host, value, warning, critical))
if warning >= value > critical or warning <= value < critical:
warning_state.append(host)
elif (warning < critical and critical <= value) or (warning > critical and critical >= value):
critical_state.append(host)
if not values:
# Zookeeper may be down, not serving requests or we may have a bad configuration
print 'Critical, %s not found' % opts.key
return 2
values = ' '.join(values)
if critical_state:
print 'Critical "%s" %s!|%s' % (opts.key, ', '.join(critical_state), values)
return 2
elif warning_state:
print 'Warning "%s" %s!|%s' % (opts.key, ', '.join(warning_state), values)
return 1
else:
print 'Ok "%s"!|%s' % (opts.key, values)
return 0
class CactiHandler(object):
@classmethod
def register_options(cls, parser):
group = OptionGroup(parser, 'Cacti specific options')
group.add_option('-l', '--leader', dest='leader',
action="store_true", help="only query the cluster leader")
parser.add_option_group(group)
def analyze(self, opts, cluster_stats):
if opts.key is None:
print >>sys.stderr, 'The key name is mandatory.'
return 1
if opts.leader is True:
try:
leader = [x for x in cluster_stats.values() \
if x.get('zk_server_state', '') == 'leader'][0]
except IndexError:
print >>sys.stderr, 'No leader found.'
return 3
if opts.key in leader:
print leader[opts.key]
return 0
else:
print >>sys.stderr, 'Unknown key: "%s"' % opts.key
return 2
else:
for host, stats in cluster_stats.items():
if opts.key not in stats:
continue
host = host.replace(':', '_')
print '%s:%s' % (host, stats[opts.key]),
class GangliaHandler(object):
@classmethod
def register_options(cls, parser):
group = OptionGroup(parser, 'Ganglia specific options')
group.add_option('-g', '--gmetric', dest='gmetric',
default='/usr/bin/gmetric', help='ganglia gmetric binary '\
'location: /usr/bin/gmetric')
parser.add_option_group(group)
def call(self, *args, **kwargs):
subprocess.call(*args, **kwargs)
def analyze(self, opts, cluster_stats):
if len(cluster_stats) != 1:
print >>sys.stderr, 'Only allowed to monitor a single node.'
return 1
for host, stats in cluster_stats.items():
for k, v in stats.items():
try:
self.call([opts.gmetric, '-n', k, '-v', str(int(v)), '-t', 'uint32'])
except (TypeError, ValueError):
pass
class ZooKeeperServer(object):
def __init__(self, host='localhost', port='2181', timeout=1):
self._address = (host, int(port))
self._timeout = timeout
def get_stats(self):
""" Get ZooKeeper server stats as a map """
data = self._send_cmd('mntr')
stat = self._parse_stat(self._send_cmd('stat'))
if data:
mntr = self._parse(data)
missing = ['zk_zxid', 'zk_zxid_counter', 'zk_zxid_epoch']
for m in missing:
if m in stat:
mntr[m] = stat[m]
return mntr
else:
return stat
def _create_socket(self):
return socket.socket()
def _send_cmd(self, cmd):
""" Send a 4letter word command to the server """
s = self._create_socket()
s.settimeout(self._timeout)
s.connect(self._address)
s.send(cmd)
data = s.recv(2048)
s.close()
return data
def _parse(self, data):
""" Parse the output from the 'mntr' 4letter word command """
h = StringIO(data)
result = {}
for line in h.readlines():
try:
key, value = self._parse_line(line)
result[key] = value
except ValueError:
pass # ignore broken lines
return result
def _parse_stat(self, data):
""" Parse the output from the 'stat' 4letter word command """
h = StringIO(data)
result = {}
version = h.readline()
if version:
result['zk_version'] = version[version.index(':')+1:].strip()
# skip all lines until we find the empty one
while h.readline().strip(): pass
for line in h.readlines():
m = re.match('Latency min/avg/max: (\d+)/(\d+)/(\d+)', line)
if m is not None:
result['zk_min_latency'] = int(m.group(1))
result['zk_avg_latency'] = int(m.group(2))
result['zk_max_latency'] = int(m.group(3))
continue
m = re.match('Received: (\d+)', line)
if m is not None:
result['zk_packets_received'] = int(m.group(1))
continue
m = re.match('Sent: (\d+)', line)
if m is not None:
result['zk_packets_sent'] = int(m.group(1))
continue
m = re.match('Alive connections: (\d+)', line)
if m is not None:
result['zk_num_alive_connections'] = int(m.group(1))
continue
m = re.match('Outstanding: (\d+)', line)
if m is not None:
result['zk_outstanding_requests'] = int(m.group(1))
continue
m = re.match('Mode: (.*)', line)
if m is not None:
result['zk_server_state'] = m.group(1)
continue
m = re.match('Node count: (\d+)', line)
if m is not None:
result['zk_znode_count'] = int(m.group(1))
continue
m = re.match('Watch count: (\d+)', line)
if m is not None:
result['zk_watch_count'] = int(m.group(1))
continue
m = re.match('Ephemerals count: (\d+)', line)
if m is not None:
result['zk_ephemerals_count'] = int(m.group(1))
continue
m = re.match('Approximate data size: (\d+)', line)
if m is not None:
result['zk_approximate_data_size'] = int(m.group(1))
continue
m = re.match('Open file descriptor count: (\d+)', line)
if m is not None:
result['zk_open_file_descriptor_count'] = int(m.group(1))
continue
m = re.match('Max file descriptor count: (\d+)', line)
if m is not None:
result['zk_max_file_descriptor_count'] = int(m.group(1))
continue
m = re.match('Zxid: (0x[0-9a-fA-F]+)', line)
if m is not None:
result['zk_zxid'] = m.group(1)
result['zk_zxid_counter'] = int(m.group(1), 16) & int('0xffffffff', 16) # lower 32 bits
result['zk_zxid_epoch'] = int(m.group(1), 16) >>32 # high 32 bits
continue
m = re.match('Proposal sizes last/min/max: (\d+)/(\d+)/(\d+)', line)
if m is not None:
result['zk_last_proposal_size'] = int(m.group(1))
result['zk_min_proposal_size'] = int(m.group(2))
result['zk_max_proposal_size'] = int(m.group(3))
continue
return result
def _parse_line(self, line):
try:
key, value = map(str.strip, line.split('\t'))
except ValueError:
raise ValueError('Found invalid line: %s' % line)
if not key:
raise ValueError('The key is mandatory and should not be empty')
for typ in [int, float]:
try:
value = typ(value)
break
except (TypeError, ValueError):
pass
return key, value
def main():
opts, args = parse_cli()
cluster_stats = get_cluster_stats(opts.servers)
if opts.output is None:
dump_stats(cluster_stats)
return 0
handler = create_handler(opts.output)
if handler is None:
log.error('undefined handler: %s' % opts.output)
sys.exit(1)
return handler.analyze(opts, cluster_stats)
def create_handler(name):
""" Return an instance of a platform specific analyzer """
try:
return globals()['%sHandler' % name.capitalize()]()
except KeyError:
return None
def get_all_handlers():
""" Get a list containing all the platform specific analyzers """
return [NagiosHandler, CactiHandler, GangliaHandler]
def dump_stats(cluster_stats):
""" Dump cluster statistics in an user friendly format """
for server, stats in cluster_stats.items():
print 'Server:', server
for key, value in stats.items():
print "%30s" % key, ' ', value
print
def get_cluster_stats(servers):
""" Get stats for all the servers in the cluster """
stats = {}
for host, port in servers:
try:
zk = ZooKeeperServer(host, port)
stats["%s:%s" % (host, port)] = zk.get_stats()
except socket.error, e:
# ignore because the cluster can still work even
# if some servers fail completely
# this error should be also visible in a variable
# exposed by the server in the statistics
logging.info('unable to connect to server '\
'"%s" on port "%s"' % (host, port))
return stats
def get_version():
return '.'.join(map(str, __version__))
def parse_cli():
parser = OptionParser(usage='./check_zookeeper.py <options>', version=get_version())
parser.add_option('-s', '--servers', dest='servers',
help='a list of SERVERS', metavar='SERVERS')
parser.add_option('-o', '--output', dest='output',
help='output HANDLER: nagios, ganglia, cacti', metavar='HANDLER')
parser.add_option('-k', '--key', dest='key')
for handler in get_all_handlers():
handler.register_options(parser)
opts, args = parser.parse_args()
if opts.servers is None:
parser.error('The list of servers is mandatory')
opts.servers = [s.split(':') for s in opts.servers.split(',')]
return (opts, args)
if __name__ == '__main__':
sys.exit(main())
|
6b3c9e957c6a982e7d90549c482b009a1586a8b0
|
7260860cc391503e839929d77722004d17e47681
|
/django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py
|
d15639bdb00de04171288dc6983956b0e524fc81
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
paulocheque/django-dynamic-fixture
|
5937e144a4504ab84e05da673c7bf3ce90ad9d1e
|
5b5ffaca3bef3640feb2031feb3e895971b3b939
|
refs/heads/master
| 2023-08-30T20:56:47.857000
| 2023-08-27T01:29:16
| 2023-08-27T01:29:16
| 3,934,109
| 212
| 47
|
NOASSERTION
| 2023-09-05T20:29:14
| 2012-04-04T22:54:41
|
Python
|
UTF-8
|
Python
| false
| false
| 9,709
|
py
|
test_ddf_teaching_and_lessons.py
|
import re
from django.test import TestCase
import pytest
from django_dynamic_fixture.models_test import *
from django_dynamic_fixture.ddf import *
from django_dynamic_fixture.fixture_algorithms.sequential_fixture import SequentialDataFixture
data_fixture = SequentialDataFixture()
class DDFTestCase(TestCase):
def setUp(self):
self.ddf = DynamicFixture(data_fixture)
DDFLibrary.get_instance().clear()
class TeachAndLessonsTest(DDFTestCase):
def test_teach_a_default_lesson_for_a_model(self):
self.ddf.teach(ModelForLibrary, integer=1000)
instance = self.ddf.get(ModelForLibrary)
assert instance.integer == 1000
def test_default_lesson_may_be_overrided_although_it_is_an_anti_pattern(self):
self.ddf.teach(ModelForLibrary, integer=1000)
instance = self.ddf.get(ModelForLibrary)
assert instance.integer == 1000
self.ddf.teach(ModelForLibrary, integer=1001)
instance = self.ddf.get(ModelForLibrary)
assert instance.integer == 1001
def test_it_must_NOT_raise_an_error_if_user_try_to_use_a_not_saved_default_configuration(self):
self.ddf.get(ModelForLibrary)
def test_it_must_raise_an_error_if_try_to_set_a_static_value_to_a_field_with_unicity(self):
with pytest.raises(InvalidConfigurationError):
self.ddf.teach(ModelForLibrary, integer_unique=1000)
def test_it_allows_to_use_masks_as_lessons_for_unique_integer_fields(self):
self.ddf.teach(ModelForLibrary, integer_unique=Mask('1###'))
instance = self.ddf.get(ModelForLibrary)
assert 1000 <= int(instance.integer_unique) <= 1999
def test_it_allows_to_use_masks_as_lessons_for_unique_char_fields(self):
self.ddf.teach(ModelWithUniqueCharField, text_unique=Mask('---- ### __'))
instance = self.ddf.get(ModelWithUniqueCharField)
assert re.match(r'[A-Z]{4} [0-9]{3} [a-z]{2}', instance.text_unique)
def test_it_must_accept_dynamic_values_for_fields_with_unicity(self):
self.ddf.teach(ModelForLibrary, integer_unique=lambda field: 1000)
def test_it_must_NOT_propagate_lessons_for_internal_dependencies(self):
self.ddf.teach(ModelForLibrary, foreignkey=DynamicFixture(data_fixture, integer=1000))
instance = self.ddf.get(ModelForLibrary)
assert instance.integer != 1000
assert instance.foreignkey.integer == 1000
def test_it_must_use_lessons_for_internal_dependencies(self):
# ModelForLibrary.foreignkey is a `ModelForLibrary2`
self.ddf.teach(ModelForLibrary, integer=1000)
self.ddf.teach(ModelForLibrary2, integer=1001)
instance = self.ddf.get(ModelForLibrary, foreignkey=DynamicFixture(data_fixture))
assert instance.integer == 1000
assert instance.foreignkey.integer == 1001
# Not implemented yet
# def test_teaching_must_store_ddf_configs_too(self):
# self.ddf.teach(ModelForLibrary, fill_nullable_fields=False)
# instance = self.ddf.get(ModelForLibrary)
# assert instance.integer is None
# DDFLibrary.get_instance().clear()
# self.ddf.teach(ModelForLibrary, fill_nullable_fields=True)
# instance = self.ddf.get(ModelForLibrary)
# assert instance.integer is not None
# Not implemented yet
# def test_teaching_ddf_configs_must_NOT_be_propagated_to_another_models(self):
# self.ddf.teach(ModelForLibrary, fill_nullable_fields=False)
# instance = self.ddf.get(ModelForLibrary)
# assert instance.integer is None
# assert instance.foreignkey.integer is None
# DDFLibrary.get_instance().clear()
# self.ddf.teach(ModelForLibrary, fill_nullable_fields=True)
# instance = self.ddf.get(ModelForLibrary)
# assert instance.integer is not None
# assert instance.foreignkey.integer is None # not populated
class TeachingAndCustomLessonsTest(DDFTestCase):
def test_a_model_can_have_custom_lessons(self):
self.ddf.teach(ModelForLibrary, integer=1000, ddf_lesson=None)
self.ddf.teach(ModelForLibrary, integer=1001, ddf_lesson='a name')
instance = self.ddf.get(ModelForLibrary)
assert instance.integer == 1000
instance = self.ddf.get(ModelForLibrary, ddf_lesson='a name')
assert instance.integer == 1001
def test_custom_lessons_must_not_be_used_if_not_explicity_specified(self):
self.ddf.teach(ModelForLibrary, integer=1000, ddf_lesson='a name')
instance = self.ddf.get(ModelForLibrary)
assert instance.integer != 1000
def test_a_model_can_have_many_custom_lessons(self):
self.ddf.teach(ModelForLibrary, integer=1000, ddf_lesson='a name')
self.ddf.teach(ModelForLibrary, integer=1001, ddf_lesson='a name 2')
instance = self.ddf.get(ModelForLibrary, ddf_lesson='a name')
assert instance.integer == 1000
instance = self.ddf.get(ModelForLibrary, ddf_lesson='a name 2')
assert instance.integer == 1001
def test_it_must_raise_an_error_if_user_try_to_use_a_not_saved_configuration(self):
with pytest.raises(InvalidConfigurationError):
self.ddf.get(ModelForLibrary, ddf_lesson='a not teached lesson')
def test_default_lesson_and_custom_lesson_must_work_together(self):
# regression test
self.ddf.teach(ModelForLibrary, integer=1000, ddf_lesson='a name')
self.ddf.teach(ModelForLibrary, integer=1001, ddf_lesson=True)
self.ddf.teach(ModelForLibrary, integer=1002, ddf_lesson='a name2')
instance = self.ddf.get(ModelForLibrary, ddf_lesson='a name')
assert instance.integer == 1000
instance = self.ddf.get(ModelForLibrary)
assert instance.integer == 1001
instance = self.ddf.get(ModelForLibrary, ddf_lesson='a name2')
assert instance.integer == 1002
def test_default_lesson_and_custom_lesson_must_work_together_for_different_models(self):
# regression test
self.ddf.teach(ModelForLibrary, integer=1000, ddf_lesson='a name')
self.ddf.teach(ModelForLibrary, integer=1001, ddf_lesson=True)
self.ddf.teach(ModelForLibrary, integer=1002, ddf_lesson='a name2')
self.ddf.teach(ModelForLibrary2, integer=2000, ddf_lesson='a name')
self.ddf.teach(ModelForLibrary2, integer=2001, ddf_lesson=True)
self.ddf.teach(ModelForLibrary2, integer=2002, ddf_lesson='a name2')
instance = self.ddf.get(ModelForLibrary, ddf_lesson='a name')
assert instance.integer == 1000
instance = self.ddf.get(ModelForLibrary)
assert instance.integer == 1001
instance = self.ddf.get(ModelForLibrary, ddf_lesson='a name2')
assert instance.integer == 1002
instance = self.ddf.get(ModelForLibrary2, ddf_lesson='a name')
assert instance.integer == 2000
instance = self.ddf.get(ModelForLibrary2)
assert instance.integer == 2001
instance = self.ddf.get(ModelForLibrary2, ddf_lesson='a name2')
assert instance.integer == 2002
class DDFLibraryTest(TestCase):
def setUp(self):
self.lib = DDFLibrary()
def test_add_and_get_configuration_without_string_name(self):
self.lib.add_configuration(ModelForLibrary, {'a': 1})
assert self.lib.get_configuration(ModelForLibrary) == {'a': 1}
assert self.lib.get_configuration(ModelForLibrary, name=DDFLibrary.DEFAULT_KEY) == {'a': 1}
assert self.lib.get_configuration(ModelForLibrary, name=None) == {'a': 1}
self.lib.clear()
self.lib.add_configuration(ModelForLibrary, {'a': 2}, name=None)
assert self.lib.get_configuration(ModelForLibrary) == {'a': 2}
assert self.lib.get_configuration(ModelForLibrary, name=DDFLibrary.DEFAULT_KEY) == {'a': 2}
assert self.lib.get_configuration(ModelForLibrary, name=None) == {'a': 2}
self.lib.clear()
self.lib.add_configuration(ModelForLibrary, {'a': 3}, name=True)
assert self.lib.get_configuration(ModelForLibrary) == {'a': 3}
assert self.lib.get_configuration(ModelForLibrary, name=DDFLibrary.DEFAULT_KEY) == {'a': 3}
assert self.lib.get_configuration(ModelForLibrary, name=None) == {'a': 3}
def test_add_and_get_configuration_with_name(self):
self.lib.add_configuration(ModelForLibrary, {'a': 1}, name='x')
assert self.lib.get_configuration(ModelForLibrary, name='x') == {'a': 1}
def test_clear_config(self):
self.lib.clear_configuration(ModelForLibrary) # run ok if empty
self.lib.add_configuration(ModelForLibrary, {'a': 1})
self.lib.add_configuration(ModelForLibrary, {'a': 2}, name='x')
self.lib.add_configuration(ModelForLibrary2, {'a': 3})
self.lib.clear_configuration(ModelForLibrary)
assert self.lib.get_configuration(ModelForLibrary) == {}
with pytest.raises(Exception):
self.lib.get_configuration(ModelForLibrary, name='x')
assert self.lib.get_configuration(ModelForLibrary2) == {'a': 3}
def test_clear(self):
self.lib.add_configuration(ModelForLibrary, {'a': 1})
self.lib.add_configuration(ModelForLibrary, {'a': 2}, name='x')
self.lib.add_configuration(ModelForLibrary2, {'a': 3})
self.lib.add_configuration(ModelForLibrary2, {'a': 4}, name='x')
self.lib.clear()
assert self.lib.get_configuration(ModelForLibrary) == {}
with pytest.raises(Exception):
self.lib.get_configuration(ModelForLibrary, name='x')
assert self.lib.get_configuration(ModelForLibrary2) == {}
with pytest.raises(Exception):
self.lib.get_configuration(ModelForLibrary2, name='x')
|
d43486d879a17b344dac1a686a5af4cc0cf20017
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/interpreter/commands/aquant.py
|
1bb479f6892a642e5c45415428eea1a28f345963
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 5,458
|
py
|
aquant.py
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import glob
import logging
import pickle
from pathlib import Path
from cmd2 import Cmd2ArgumentParser, with_argparser
from cmd2.cmd2 import Cmd
from nntool.interpreter.commands.qtune import load_options
from nntool.interpreter.nntool_shell_base import (NNToolShellBase,
store_once_in_history)
from nntool.interpreter.shell_utils import glob_input_files, input_options
from nntool.quantization.handlers_helpers import (add_options_to_parser,
get_options_from_args)
from nntool.quantization.quantizer.new_quantizer import NewQuantizer
from nntool.utils.data_importer import import_data
from nntool.utils.stats_funcs import STATS_BITS
from nntool.graph.types import ConstantInputNode
from nntool.stats.activation_ranges_collector import ActivationRangesCollector
LOG = logging.getLogger(__name__)
QUANTIZATION_SCHEMES = ['SQ8', 'POW2', 'FLOAT']
class AquantCommand(NNToolShellBase):
# AQUANT COMMAND
parser_aquant = Cmd2ArgumentParser()
parser_aquant.add_argument('-f',
'--force_width',
choices=STATS_BITS, type=int, default=0,
help='force all layers to this bit-width in case of POW2 scheme, ' +
'SQ8 will automatically force 8-bits')
parser_aquant.add_argument('-s', '--scheme',
type=str, choices=QUANTIZATION_SCHEMES, default='SQ8',
help='quantize with scaling factors (TFlite quantization-like) [default] or POW2')
parser_aquant.add_argument('--stats',
completer_method=Cmd.path_complete,
help='pickle file containing statistics')
parser_aquant.add_argument('--json',
completer_method=Cmd.path_complete,
help='json file file containing saved quantization options using qtunesave command')
add_options_to_parser(parser_aquant)
input_options(parser_aquant)
@with_argparser(parser_aquant)
@store_once_in_history
def do_aquant(self, args: argparse.Namespace):
"""
Attempt to calculate quantization for graph using one or more sample input files."""
self._check_graph()
stats_collector = ActivationRangesCollector()
# if replaying state file then load the activation stats if they are present
graph_options = get_options_from_args(args)
node_options = {}
if args.json:
json_path = Path(args.json)
if not json_path.exists() or not json_path.is_file():
self.perror(f'{json_path} does not exist or is not a file')
return
loaded_graph_options, loaded_node_options = load_options(json_path)
loaded_graph_options.update(graph_options)
for nid, opts in loaded_node_options.items():
node_options.setdefault(nid, {}).update(opts)
graph_options = loaded_graph_options
state = ConstantInputNode.save_compression_state(self.G)
try:
if args.stats:
stats_file = glob.glob(args.stats)
stats_file = stats_file[0] if stats_file else args.stats
with open(stats_file, 'rb') as file_pointer:
astats = pickle.load(file_pointer)
elif self.replaying_history and self.history_stats:
astats = self.history_stats
else:
input_args = self._get_input_args(args)
processed_input = False
for file_per_input in glob_input_files(args.input_files, self.G.num_inputs):
LOG.debug("input file %s", file_per_input)
processed_input = True
data = [import_data(input_file, **input_args)
for input_file in file_per_input]
stats_collector.collect_stats(self.G, data)
if not processed_input:
self.perror("No input files found")
return
astats = stats_collector.stats
self._record_stats(astats)
if args.force_width:
graph_options['bits'] = args.force_width
quantizer = NewQuantizer(self.G)
quantizer.schemes.append(args.scheme)
quantizer.set_stats(astats, graph_options, node_options)
quantizer.quantize()
self.G.add_dimensions()
LOG.info("Quantization set. Use qshow command to see it.")
finally:
ConstantInputNode.restore_compression_state(self.G, state)
|
1db4ea7478160606b4eea480aef773b7ed773e5a
|
f45e5600447a7ee2d4f6ba7c1ebdce5299a283aa
|
/test_project/views.py
|
6bcb6da98f7ef19d7867947d95da2a4854d50d40
|
[
"MIT"
] |
permissive
|
GeeWee/django-auto-prefetching
|
bad658b1640afddb0c1abd70365128ea309651c5
|
a7e1936961bb095f18ca53442ee558ce4fc82942
|
refs/heads/master
| 2023-08-11T10:13:21.972061
| 2023-08-02T09:36:10
| 2023-08-02T09:36:10
| 181,546,261
| 238
| 23
|
MIT
| 2023-08-02T09:36:12
| 2019-04-15T18:44:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,464
|
py
|
views.py
|
# Create your views here.
from rest_framework.viewsets import ModelViewSet
from django_auto_prefetching import AutoPrefetchViewSetMixin
import django_auto_prefetching
from test_project.serializers.child_a_serializer import ChildASerializer
from test_project.serializers.child_b_serializers import ChildBSerializer
from test_project.serializers.many_to_many_serializer import (
ManyTwoSerializerOnlyFullRepresentation,
)
from test_project.models import ManyToManyModelTwo, ChildB, ChildA
class ManyTwoSerializerOnlyFullRepresentationViewSet(
AutoPrefetchViewSetMixin, ModelViewSet
):
serializer_class = ManyTwoSerializerOnlyFullRepresentation
queryset = ManyToManyModelTwo.objects.all()
class ChildBViewSet(ModelViewSet):
serializer_class = ChildBSerializer
queryset = ChildB.objects.all()
class WrongQuerySetOverride(AutoPrefetchViewSetMixin, ModelViewSet):
serializer_class = ChildASerializer
def get_queryset(self):
return ChildA.objects.all()
class RightQuerySetOverride(AutoPrefetchViewSetMixin, ModelViewSet):
serializer_class = ChildASerializer
def get_queryset(self):
qs = ChildA.objects.all()
return django_auto_prefetching.prefetch(qs, self.serializer_class)
class GetPrefetchableQuerysetOverride(AutoPrefetchViewSetMixin, ModelViewSet):
serializer_class = ChildASerializer
def get_prefetchable_queryset(self):
return ChildA.objects.filter(childA_text='text_1')
|
32f24b71b5fa89a6bc6a162688a101103effbf1f
|
bcabfb2a3cfb7a06004774dcf1a64412f0e70738
|
/setting.py
|
966ee270eb44c537b38f0d8b7cd6490a54b3f889
|
[
"Apache-2.0"
] |
permissive
|
CarryChang/Customer_Satisfaction_Analysis
|
afea9cd90d1df0f37ea551daa94a6461e871bc9f
|
ff3c99bdb567fa648cc5e71a38e2b5d0db0010e2
|
refs/heads/master
| 2023-01-11T15:35:14.414156
| 2022-12-27T10:50:46
| 2022-12-27T10:50:46
| 162,680,457
| 403
| 122
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
setting.py
|
# -*- coding: utf-8 -*-
# @USER: CarryChang
# 设置词典大小为 n_features
n_features = 3000
# 存储断句的文件夹
sentence_cut_path = 'data/sentence_cut.txt'
# 主题句文件夹
topic_path = 'topic_text'
# 基于字典的查找
topic_words_list = {
'环境': {'环境', '周边', '风景', '空气', '江景', '小区', '景点', '夜景', '街', '周围', '景区', '声音', '景色'},
'价格': {'价格', '房价', '性价比', '价位', '单价', '价钱'},
'特色': {'特色', '装潢', '布置', '建筑', '结构', '格调', '装修', '设计', '风格', '隔音'},
'设施': {'设施', '设备', '条件', '硬件', '房间', '热水', '马桶', '电梯', '阳台', '卫生间', '洗手间', '空调', '被子', '床', '大厅', '电话', '电', '摆设'},
'餐饮': {'餐饮', '早餐', '咖啡', '味道', '饭', '菜', '水果', '特产', '餐', '美食', '烧烤', '宵夜', '食材', '饭馆', '小吃'},
'交通': {'交通', '车程', '地段', '路程', '停车', '机场', '离', '车站', '地理', '位置', '地理', '中心', '海拔', '码头'},
'服务': {'服务', '态度', '前台', '服务员', '老板', '掌柜', '店家', '工作人员'},
'体验': {'体验', '整体', '感觉'},
}
# 存储情感极性的图
topic_emotion_pic = 'topic_emotion_pic'
# 最大句子长度
maxlen = 100
# 最大的tokenizer字典长度
max_words = 1000
# 设置embedding大小
embedding_dim = 300
# train_method : 模型训练方式,默认 textcnn,可选:bilstm , gru
train_method = 'textcnn'
# 模型的保存位置,后续用于推理
sa_model_path_m = 'model_saved/model.h5'
# 离线保存tokenizer
tokenize_path = 'model_saved/tokenizer.pickle'
|
2317e3df6d65ab3ec5cfd324812303fefa84264e
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/frontends/console/args.py
|
20b79f9a9d3ae6c155320c6c1a03de9c4bd06025
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
args.py
|
import argparse
from typing import NamedTuple
from tic_tac_toe.game.players import (
MinimaxComputerPlayer,
Player,
RandomComputerPlayer,
)
from tic_tac_toe.logic.models import Mark
from .players import ConsolePlayer
PLAYER_CLASSES = {
"human": ConsolePlayer,
"random": RandomComputerPlayer,
"minimax": MinimaxComputerPlayer,
}
class Args(NamedTuple):
player1: Player
player2: Player
starting_mark: Mark
def parse_args() -> Args:
parser = argparse.ArgumentParser()
parser.add_argument(
"-X",
dest="player_x",
choices=PLAYER_CLASSES.keys(),
default="human",
)
parser.add_argument(
"-O",
dest="player_o",
choices=PLAYER_CLASSES.keys(),
default="minimax",
)
parser.add_argument(
"--starting",
dest="starting_mark",
choices=Mark,
type=Mark,
default="X",
)
args = parser.parse_args()
player1 = PLAYER_CLASSES[args.player_x](Mark("X"))
player2 = PLAYER_CLASSES[args.player_o](Mark("O"))
if args.starting_mark == "O":
player1, player2 = player2, player1
return Args(player1, player2, args.starting_mark)
|
c3f4237fe7e976f689bce910976edebb9a51ca36
|
8eb4e39e33969389aca96a79b56f298abcca6872
|
/setup.py
|
0297e7467f8ee603876d159d16604a2a6442cd6a
|
[
"BSD-3-Clause"
] |
permissive
|
cpplint/cpplint
|
8e4084109e0512dc8d196e178120ba2d09bc9edc
|
7b88b68187e3516540fab3caa900988d2179ed24
|
refs/heads/develop
| 2023-03-18T17:48:54.664951
| 2023-02-03T15:16:23
| 2023-02-13T20:52:21
| 47,154,941
| 1,392
| 318
|
NOASSERTION
| 2023-09-01T03:24:19
| 2015-12-01T00:33:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,081
|
py
|
setup.py
|
#! /usr/bin/env python
from setuptools import setup, Command
from subprocess import check_call
from distutils.spawn import find_executable
import cpplint as cpplint
class Cmd(Command):
'''
Superclass for other commands to run via setup.py, declared in setup.cfg.
These commands will auto-install setup_requires in a temporary folder.
'''
user_options = [
('executable', 'e', 'The executable to use for the command')
]
def initialize_options(self):
self.executable = find_executable(self.executable)
def finalize_options(self):
pass
def execute(self, *k):
check_call((self.executable,) + k)
class Lint(Cmd):
'''run with python setup.py lint'''
description = 'Run linting of the code'
user_options = Cmd.user_options + [
('jobs', 'j', 'Use multiple processes to speed up the linting')
]
executable = 'pylint'
def run(self):
self.execute('cpplint.py')
# some pip versions bark on comments (e.g. on travis)
def read_without_comments(filename):
with open(filename) as f:
return [line for line in f.read().splitlines() if not len(line) == 0 and not line.startswith('#')]
test_required = read_without_comments('test-requirements')
setup(name='cpplint',
version=cpplint.__VERSION__,
py_modules=['cpplint'],
# generate platform specific start script
entry_points={
'console_scripts': [
'cpplint = cpplint:main'
]
},
install_requires=[],
url='https://github.com/cpplint/cpplint',
download_url='https://github.com/cpplint/cpplint',
keywords=['lint', 'python', 'c++'],
maintainer='cpplint Developers',
maintainer_email='see_github@nospam.com',
classifiers=['Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: C++',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Topic :: Software Development :: Quality Assurance',
'License :: Freely Distributable'],
description='Automated checker to ensure C++ files follow Google\'s style guide',
long_description=open('README.rst').read(),
license='BSD-3-Clause',
setup_requires=[
"pytest-runner==5.2"
],
tests_require=test_required,
# extras_require allow pip install .[dev]
extras_require={
'test': test_required,
'dev': read_without_comments('dev-requirements') + test_required
},
cmdclass={
'lint': Lint
})
|
8297fdfb43d094c990d29786eabc93bef965f819
|
4f3bf85beb89f27b025339ba62fa4162b1338623
|
/pinax/referrals/middleware.py
|
db73f6ac3ae2821a73fff7a4397af1d3cac0b1b4
|
[
"MIT"
] |
permissive
|
pinax/pinax-referrals
|
d13b41dd9b50bc905ef02ff3d2adf4dad2a3534e
|
6ad4640c71718ac31191ad2dc7d964f0af6930f0
|
refs/heads/master
| 2023-08-17T06:53:34.629111
| 2023-01-12T14:41:13
| 2023-01-12T14:41:13
| 2,397,676
| 151
| 69
|
MIT
| 2023-01-12T14:35:50
| 2011-09-16T06:21:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
middleware.py
|
from django.core.exceptions import ImproperlyConfigured
from .models import Referral
try:
from django.utils.deprecation import MiddlewareMixin as MiddlewareBaseClass
except ImportError:
MiddlewareBaseClass = object
class SessionJumpingMiddleware(MiddlewareBaseClass):
def process_request(self, request):
if not hasattr(request, "user"):
raise ImproperlyConfigured(
"django.contrib.auth.middleware.AuthenticationMiddleware middleware must come "
"before pinax.referrals.middleware.SessionJumpingMiddleware"
)
cookie = request.COOKIES.get("pinax-referral")
if request.user.is_authenticated and cookie:
code, session_key = cookie.split(":")
try:
referral = Referral.objects.get(code=code)
referral.link_responses_to_user(request.user, session_key)
except Referral.DoesNotExist:
pass
request.user._can_delete_pinax_referral_cookie = True
def process_response(self, request, response):
if hasattr(request, "user") and getattr(request.user, "_can_delete_pinax_referral_cookie", False):
response.delete_cookie("pinax-referral")
return response
|
76b57aae7d36e17db72b9e6ed35bc3ab1b97d044
|
d689b3caab44773d1c27327c7635748b7510eef0
|
/barbican/plugin/interface/secret_store.py
|
1eeb54565cdeaeeb7a4563ff9fb8f47717d4cc30
|
[
"Apache-2.0"
] |
permissive
|
openstack/barbican
|
e9d8cd8c7821fbd0cd42381358040540e010d885
|
c8e3dc14e6225f1d400131434e8afec0aa410ae7
|
refs/heads/master
| 2023-09-04T03:08:34.120188
| 2023-08-28T08:06:46
| 2023-08-28T08:06:46
| 20,115,712
| 189
| 93
|
Apache-2.0
| 2021-07-27T06:00:23
| 2014-05-23T22:15:27
|
Python
|
UTF-8
|
Python
| false
| false
| 25,738
|
py
|
secret_store.py
|
# Copyright (c) 2014 Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_config import cfg
from stevedore import named
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
from barbican.plugin.util import multiple_backends
from barbican.plugin.util import utils as plugin_utils
_SECRET_STORE = None
CONF = config.new_config()
DEFAULT_PLUGIN_NAMESPACE = 'barbican.secretstore.plugin'
DEFAULT_PLUGINS = ['store_crypto']
store_opt_group = cfg.OptGroup(name='secretstore',
title='Secret Store Plugin Options')
store_opts = [
cfg.StrOpt('namespace',
default=DEFAULT_PLUGIN_NAMESPACE,
help=u._('Extension namespace to search for plugins.')
),
cfg.MultiStrOpt('enabled_secretstore_plugins',
default=DEFAULT_PLUGINS,
help=u._('List of secret store plugins to load.')
),
cfg.BoolOpt('enable_multiple_secret_stores',
default=False,
help=u._('Flag to enable multiple secret store plugin'
' backend support. Default is False')
),
cfg.ListOpt('stores_lookup_suffix',
help=u._('List of suffix to use for looking up plugins which '
'are supported with multiple backend support.')
)
]
CONF.register_group(store_opt_group)
CONF.register_opts(store_opts, group=store_opt_group)
config.parse_args(CONF)
config.set_module_config("secretstore", CONF)
def list_opts():
yield store_opt_group, store_opts
class SecretStorePluginNotFound(exception.BarbicanHTTPException):
"""Raised when no plugins are installed."""
client_message = u._("No plugin was found that could support your request")
status_code = 400
def __init__(self, plugin_name=None):
if plugin_name:
message = u._('Secret store plugin "{name}"'
' not found.').format(name=plugin_name)
else:
message = u._("Secret store plugin not found.")
super(SecretStorePluginNotFound, self).__init__(message)
class SecretStoreSupportedPluginNotFound(exception.BarbicanHTTPException):
"""Raised when no secret store supported plugin is found."""
client_message = u._("Secret store supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for storing "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretStoreSupportedPluginNotFound, self).__init__(
message)
class SecretGenerateSupportedPluginNotFound(exception.BarbicanHTTPException):
"""Raised when no secret generate supported plugin is found."""
client_message = u._("Secret generate supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for generating "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretGenerateSupportedPluginNotFound, self).__init__(
message)
class SecretContentTypeNotSupportedException(exception.BarbicanHTTPException):
"""Raised when support for payload content type is not available."""
status_code = 400
def __init__(self, content_type):
super(SecretContentTypeNotSupportedException, self).__init__(
u._("A Content-Type of '{content_type}' for secrets is "
"not supported").format(
content_type=content_type)
)
self.content_type = content_type
self.client_message = u._(
"content-type of '{content_type}' not supported").format(
content_type=content_type)
class SecretContentEncodingNotSupportedException(
exception.BarbicanHTTPException):
"""Raised when support for payload content encoding is not available."""
status_code = 400
def __init__(self, content_encoding):
super(SecretContentEncodingNotSupportedException, self).__init__(
u._("Secret Content-Encoding of '{content_encoding}' "
"not supported").format(
content_encoding=content_encoding)
)
self.content_encoding = content_encoding
self.client_message = u._(
"content-encoding of '{content_encoding}' not supported").format(
content_encoding=content_encoding)
class SecretNoPayloadProvidedException(exception.BarbicanException):
"""Raised when secret information is not provided."""
def __init__(self):
super(SecretNoPayloadProvidedException, self).__init__(
u._('No secret information provided to encrypt.')
)
class SecretContentEncodingMustBeBase64(exception.BarbicanHTTPException):
"""Raised when encoding must be base64."""
client_message = u._("Text-based binary secret payloads must "
"specify a content-encoding of 'base64'")
status_code = 400
def __init__(self):
super(SecretContentEncodingMustBeBase64, self).__init__(
u._("Encoding type must be 'base64' for text-based payloads.")
)
class SecretGeneralException(exception.BarbicanException):
"""Raised when a system fault has occurred."""
def __init__(self, reason=u._('Unknown')):
super(SecretGeneralException, self).__init__(
u._('Problem seen during crypto processing - '
'Reason: {reason}').format(reason=reason)
)
self.reason = reason
class SecretPayloadDecodingError(exception.BarbicanHTTPException):
"""Raised when payload could not be decoded."""
client_message = u._("Problem decoding payload")
status_code = 400
def __init__(self):
super(SecretPayloadDecodingError, self).__init__(
u._("Problem decoding payload")
)
class SecretAcceptNotSupportedException(exception.BarbicanHTTPException):
"""Raised when requested decrypted content-type is not available."""
client_message = u._("Wrong payload content-type")
status_code = 406
def __init__(self, accept):
super(SecretAcceptNotSupportedException, self).__init__(
u._("Secret Accept of '{accept}' not supported").format(
accept=accept)
)
self.accept = accept
class SecretNotFoundException(exception.BarbicanHTTPException):
"""Raised when secret information could not be located."""
client_message = u._("Secret not found.")
status_code = 404
def __init__(self):
super(SecretNotFoundException, self).__init__(
u._('No secret information found'))
class SecretAlgorithmNotSupportedException(exception.BarbicanHTTPException):
"""Raised when support for an algorithm is not available."""
client_message = u._("Requested algorithm is not supported")
status_code = 400
def __init__(self, algorithm):
super(SecretAlgorithmNotSupportedException, self).__init__(
u._("Secret algorithm of '{algorithm}' not supported").format(
algorithm=algorithm)
)
self.algorithm = algorithm
class GeneratePassphraseNotSupportedException(exception.BarbicanHTTPException):
"""Raised when generating keys encrypted by passphrase is not supported."""
client_message = (
u._("Generating keys encrypted with passphrases is not supported")
)
status_code = 400
def __init__(self):
super(GeneratePassphraseNotSupportedException, self).__init__(
self.client_message
)
class SecretStorePluginsNotConfigured(exception.BarbicanException):
"""Raised when there are no secret store plugins configured."""
def __init__(self):
super(SecretStorePluginsNotConfigured, self).__init__(
u._('No secret store plugins have been configured')
)
class StorePluginNotAvailableOrMisconfigured(exception.BarbicanException):
"""Raised when a plugin that was previously used can not be found."""
def __init__(self, plugin_name):
super(StorePluginNotAvailableOrMisconfigured, self).__init__(
u._("The requested Store Plugin {plugin_name} is not "
"currently available. This is probably a server "
"misconfiguration.").format(
plugin_name=plugin_name)
)
self.plugin_name = plugin_name
class SecretType(object):
"""Constant to define the symmetric key type.
Used by getSecret to retrieve a symmetric key.
"""
SYMMETRIC = "symmetric"
"""Constant to define the public key type. Used by getSecret to retrieve a
public key.
"""
PUBLIC = "public"
"""Constant to define the private key type. Used by getSecret to retrieve a
private key.
"""
PRIVATE = "private"
"""Constant to define the passphrase type. Used by getSecret to retrieve a
passphrase."""
PASSPHRASE = "passphrase" # nosec
"""Constant to define the certificate type. Used by getSecret to retrieve a
certificate."""
CERTIFICATE = "certificate"
"""Constant to define the opaque date type. Used by getSecret to retrieve
opaque data. Opaque data can be any kind of data. This data type signals to
Barbican to just store the information and do not worry about the format or
encoding. This is the default type if no type is specified by the user."""
OPAQUE = utils.SECRET_TYPE_OPAQUE
class KeyAlgorithm(object):
"""Constant for the Diffie Hellman algorithm."""
DIFFIE_HELLMAN = "diffie_hellman"
"""Constant for the DSA algorithm."""
DSA = "dsa"
"""Constant for the RSA algorithm."""
RSA = "rsa"
"""Constant for the Elliptic Curve algorithm."""
EC = "ec"
"""Constant for the HMACSHA1 algorithm."""
HMACSHA1 = "hmacsha1"
"""Constant for the HMACSHA256 algorithm."""
HMACSHA256 = "hmacsha256"
"""Constant for the HMACSHA384 algorithm."""
HMACSHA384 = "hmacsha384"
"""Constant for the HMACSHA512 algorithm."""
HMACSHA512 = "hmacsha512"
"""List of asymmetric algorithms"""
ASYMMETRIC_ALGORITHMS = [DIFFIE_HELLMAN, DSA, RSA, EC]
"""Constant for the AES algorithm."""
AES = "aes"
"""Constant for the DES algorithm."""
DES = "des"
"""Constant for the DESede (triple-DES) algorithm."""
DESEDE = "desede"
"""List of symmetric algorithms"""
SYMMETRIC_ALGORITHMS = [AES, DES, DESEDE, HMACSHA1,
HMACSHA256, HMACSHA384, HMACSHA512]
class KeySpec(object):
"""This object specifies the algorithm and bit length for a key."""
def __init__(self, alg=None, bit_length=None, mode=None, passphrase=None):
"""Creates a new KeySpec.
:param alg:algorithm for the key
:param bit_length:bit length of the key
:param mode:algorithm mode for the key
:param passphrase:passphrase for the private_key
"""
self.alg = alg
self.bit_length = bit_length
self.mode = mode # TODO(john-wood-w) Paul, is 'mode' required?
self.passphrase = passphrase
class SecretDTO(object):
"""This object is a secret data transfer object (DTO).
This object encapsulates a key and attributes about the key. The attributes
include a KeySpec that contains the algorithm and bit length. The
attributes also include information on the encoding of the key.
"""
# TODO(john-wood-w) Remove 'content_type' once secret normalization work is
# completed.
def __init__(self, type, secret, key_spec, content_type,
transport_key=None):
"""Creates a new SecretDTO.
The secret is stored in the secret parameter. In the future this
DTO may include compression and key wrapping information.
:param type: SecretType for secret
:param secret: secret, as a base64-encoded string
:param key_spec: KeySpec key specifications
:param content_type: Content type of the secret, one of MIME
types such as 'text/plain' or 'application/octet-stream'
:param transport_key: presence of this parameter indicates that the
secret has been encrypted using a transport key. The transport
key is a base64 encoded x509 transport certificate.
"""
self.type = type or SecretType.OPAQUE
self.secret = secret
self.key_spec = key_spec
self.content_type = content_type
self.transport_key = transport_key
class AsymmetricKeyMetadataDTO(object):
"""This DTO encapsulates metadata(s) for asymmetric key components.
These components are private_key_meta, public_key_meta and passphrase_meta.
"""
def __init__(self, private_key_meta=None,
public_key_meta=None,
passphrase_meta=None):
"""Constructor for AsymmetricKeyMetadataDTO
:param private_key_meta: private key metadata
:param public_key_meta: public key metadata
:param passphrase_meta: passphrase key metadata
"""
self.private_key_meta = private_key_meta
self.public_key_meta = public_key_meta
self.passphrase_meta = passphrase_meta
class SecretStoreBase(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_plugin_name(self):
"""Gets user friendly plugin name.
This plugin name is expected to be read from config file.
There will be a default defined for plugin name which can be customized
in specific deployment if needed.
This name needs to be unique across a deployment.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_symmetric_key(self, key_spec):
"""Generate a new symmetric key and store it.
Generates a new symmetric key and stores it in the secret store.
A dictionary is returned that contains metadata about the newly created
symmetric key. The dictionary of metadata is stored by Barbican and
passed into other methods to aid the plugins. This can be useful for
plugins that generate a unique ID in the external data store and use it
to retrieve the key in the future. The returned dictionary may be empty
if the SecretStore does not require it.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: an optional dictionary containing metadata about the key
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_asymmetric_key(self, key_spec):
"""Generate a new asymmetric key pair and store it.
Generates a new asymmetric key pair and stores it in the secret
store. An object of type AsymmetricKeyMetadataDTO will be returned
containing attributes of metadata for newly created key pairs.
The metadata is stored by Barbican and passed into other methods
to aid the plugins. This can be useful for plugins that generate
a unique ID in the external data store and use it to retrieve the
key pairs in the future.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: An object of type AsymmetricKeyMetadataDTO containing
metadata about the key pair.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def store_secret(self, secret_dto):
"""Stores a key.
The SecretDTO contains the bytes of the secret and properties of the
secret. The SecretStore retrieves the secret bytes, stores them, and
returns a dictionary of metadata about the secret. This can be
useful for plugins that generate a unique ID in the external data
store and use it to retrieve the secret in the future. The returned
dictionary may be empty if the SecretStore does not require it.
:param secret_dto: SecretDTO for secret
:returns: an optional dictionary containing metadata about the secret
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def get_secret(self, secret_type, secret_metadata):
"""Retrieves a secret from the secret store.
Retrieves a secret from the secret store and returns a SecretDTO that
contains the secret.
The secret_metadata parameter is the metadata returned from one of the
generate or store methods. This data is used by the plugins to retrieve
the key.
The secret_type parameter may be useful for secret stores to know the
expected format of the secret. For instance if the type is
SecretDTO.PRIVATE then a PKCS8 structure is returned. This way secret
stores do not need to manage the secret type on their own.
:param secret_type: secret type
:param secret_metadata: secret metadata
:returns: SecretDTO that contains secret
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_supports(self, key_spec):
"""Returns a boolean indicating if the secret type is supported.
This checks if the algorithm and bit length are supported by the
generate methods. This is useful to call before calling
generate_symmetric_key or generate_asymetric_key to see if the key type
is supported before trying to generate it.
:param key_spec: KeySpec that contains details on the algorithm and bit
length
:returns: boolean indicating if the algorithm is supported
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def delete_secret(self, secret_metadata):
"""Deletes a secret from the secret store.
Deletes a secret from a secret store. It can no longer be referenced
after this call.
:param secret_metadata: secret_metadata
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def store_secret_supports(self, key_spec):
"""Returns a boolean indicating if the secret can be stored.
Checks if the secret store can store the secret, give the attributes
of the secret in the KeySpec. For example, some plugins may need to
know the attributes in order to store the secret, but other plugins
may be able to store the secret as a blob if no attributes are given.
:param key_spec: KeySpec for the secret
:returns: a boolean indicating if the secret can be stored
"""
raise NotImplementedError # pragma: no cover
def get_transport_key(self):
"""Gets a transport key.
Returns the current valid transport key associated with this plugin.
The transport key is expected to be a base64 encoded x509 certificate
containing a public key. Admins are responsible for deleting old keys
from the database using the DELETE method on the TransportKey resource.
By default, returns None. Plugins that support transport key
wrapping should override this method.
"""
return None
def is_transport_key_current(self, transport_key):
"""Determines if the provided transport key is the current valid key
Returns true if the transport key is the current valid transport key.
If the key is not valid, then barbican core will request a new
transport key from the plugin.
Returns False by default. Plugins that support transport key wrapping
should override this method.
"""
return False
def _enforce_extensions_configured(plugin_related_function):
def _check_plugins_configured(self, *args, **kwargs):
if not self.extensions:
raise SecretStorePluginsNotConfigured()
return plugin_related_function(self, *args, **kwargs)
return _check_plugins_configured
class SecretStorePluginManager(named.NamedExtensionManager):
def __init__(self, conf=CONF, invoke_args=(), invoke_kwargs={}):
ss_conf = config.get_module_config('secretstore')
plugin_names = self._get_internal_plugin_names(ss_conf)
super(SecretStorePluginManager, self).__init__(
ss_conf.secretstore.namespace,
plugin_names,
invoke_on_load=False, # Defer creating plugins to utility below.
invoke_args=invoke_args,
invoke_kwds=invoke_kwargs,
name_order=True # extensions sorted as per order of plugin names
)
plugin_utils.instantiate_plugins(self, invoke_args, invoke_kwargs)
multiple_backends.sync_secret_stores(self)
@_enforce_extensions_configured
def get_plugin_store(self, key_spec, plugin_name=None,
transport_key_needed=False, project_id=None):
"""Gets a secret store plugin.
:param: plugin_name: set to plugin_name to get specific plugin
:param: key_spec: KeySpec of key that will be stored
:param: transport_key_needed: set to True if a transport
key is required.
:returns: SecretStoreBase plugin implementation
"""
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=plugin_name)
if plugin_name is not None:
for plugin in active_plugins:
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise SecretStorePluginNotFound(plugin_name)
if not transport_key_needed:
for plugin in active_plugins:
if plugin.store_secret_supports(key_spec):
return plugin
else:
for plugin in active_plugins:
if (plugin.get_transport_key() is not None and
plugin.store_secret_supports(key_spec)):
return plugin
raise SecretStoreSupportedPluginNotFound(key_spec)
@_enforce_extensions_configured
def get_plugin_retrieve_delete(self, plugin_name):
"""Gets a secret retrieve/delete plugin.
If this function is being called, it is because we are trying to
retrieve or delete an already stored secret. Thus, the plugin name is
actually gotten from the plugin metadata that has already been stored
in the database. So, in this case, if this plugin is not available,
this might be due to a server misconfiguration.
:returns: SecretStoreBase plugin implementation
:raises: StorePluginNotAvailableOrMisconfigured: If the plugin wasn't
found it's because the plugin parameters were not properly
configured on the database side.
"""
for plugin in plugin_utils.get_active_plugins(self):
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise StorePluginNotAvailableOrMisconfigured(plugin_name)
@_enforce_extensions_configured
def get_plugin_generate(self, key_spec, project_id=None):
"""Gets a secret generate plugin.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: SecretStoreBase plugin implementation
"""
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=None)
for plugin in active_plugins:
if plugin.generate_supports(key_spec):
return plugin
raise SecretGenerateSupportedPluginNotFound(key_spec)
def _get_internal_plugin_names(self, secretstore_conf):
"""Gets plugin names used for loading via stevedore.
When multiple secret store support is enabled, then secret store plugin
names are read via updated configuration structure. If not enabled,
then it reads MultiStr property in 'secretstore' config section.
"""
# to cache default global secret store value on first use
self.global_default_store_dict = None
if utils.is_multiple_backends_enabled():
self.parsed_stores = multiple_backends.\
read_multiple_backends_config()
plugin_names = [store.store_plugin for store in self.parsed_stores
if store.store_plugin]
else:
plugin_names = secretstore_conf.secretstore.\
enabled_secretstore_plugins
return plugin_names
def get_manager():
global _SECRET_STORE
if not _SECRET_STORE:
_SECRET_STORE = SecretStorePluginManager()
return _SECRET_STORE
|
477612ad52e88c71b5d3326e35ec87dfa0bfa5e1
|
4ac8b14961a608ed79a483da306e746021165c78
|
/effect/test_parallel_performers.py
|
05b2dbf6be968f8873da9db203e9526a568c1900
|
[
"MIT"
] |
permissive
|
python-effect/effect
|
ffae552b9c638b141c6ce728392bfe320e5e705f
|
cd21859ad2babebcbf12fa372aef34b9cd25a10e
|
refs/heads/master
| 2022-07-06T20:44:12.265058
| 2022-06-26T16:45:10
| 2022-06-26T16:45:10
| 20,022,094
| 289
| 17
| null | 2021-08-24T16:34:22
| 2014-05-21T13:26:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,488
|
py
|
test_parallel_performers.py
|
from functools import partial
import attr
from testtools.matchers import MatchesStructure, Equals
from . import Effect, raise_
from ._intents import Constant, Func, FirstError, parallel
from ._sync import sync_perform
from ._test_utils import MatchesReraisedExcInfo
@attr.s(hash=True)
class EquitableException(Exception):
message = attr.ib()
class ParallelPerformerTestsMixin(object):
"""Common tests for any performer of :obj:`effect.ParallelEffects`."""
def test_empty(self):
"""
When given an empty list of effects, ``perform_parallel_async`` returns
an empty list synchronously.
"""
result = sync_perform(self.dispatcher, parallel([]))
self.assertEqual(result, [])
def test_parallel(self):
"""
'parallel' results in a list of results of the given effects, in the
same order that they were passed to parallel.
"""
result = sync_perform(
self.dispatcher, parallel([Effect(Constant("a")), Effect(Constant("b"))])
)
self.assertEqual(result, ["a", "b"])
def test_error(self):
"""
When given an effect that results in a Error,
``perform_parallel_async`` result in ``FirstError``.
"""
expected_exc = EquitableException(message="foo")
reraise = partial(raise_, expected_exc)
try:
sync_perform(self.dispatcher, parallel([Effect(Func(reraise))]))
except FirstError as fe:
self.assertThat(
fe,
MatchesStructure(
index=Equals(0), exception=MatchesReraisedExcInfo(expected_exc)
),
)
else:
self.fail("sync_perform should have raised FirstError.")
def test_error_index(self):
"""
The ``index`` of a :obj:`FirstError` is the index of the effect that
failed in the list.
"""
expected_exc = EquitableException(message="foo")
reraise = partial(raise_, expected_exc)
try:
sync_perform(
self.dispatcher,
parallel(
[Effect(Constant(1)), Effect(Func(reraise)), Effect(Constant(2))]
),
)
except FirstError as fe:
self.assertThat(
fe,
MatchesStructure(
index=Equals(1), exception=MatchesReraisedExcInfo(expected_exc)
),
)
|
d2c4e2d51d378d2af222d741e0e97904bde992a1
|
01857ef455ea60eccaf03b5a9059ec83e9803c2e
|
/examples/trello_cards/main.py
|
aa7cf0493a4324c3790505a9355f35283cc83a51
|
[
"MIT"
] |
permissive
|
zauberzeug/nicegui
|
f08312cc1f393deca79e0e84a2506d3a35efff16
|
c61b1315f29d51e26cc1168207f5616b302f8df0
|
refs/heads/main
| 2023-08-18T18:09:30.937322
| 2023-08-18T15:04:00
| 2023-08-18T15:04:00
| 365,250,183
| 5,128
| 271
|
MIT
| 2023-09-14T01:50:56
| 2021-05-07T13:55:05
|
Python
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
main.py
|
#!/usr/bin/env python3
from dataclasses import dataclass
import draganddrop as dnd
from nicegui import ui
@dataclass
class ToDo:
title: str
def handle_drop(todo: ToDo, location: str):
ui.notify(f'"{todo.title}" is now in {location}')
with ui.row():
with dnd.column('Next', on_drop=handle_drop):
dnd.card(ToDo('Simplify Layouting'))
dnd.card(ToDo('Provide Deployment'))
with dnd.column('Doing', on_drop=handle_drop):
dnd.card(ToDo('Improve Documentation'))
with dnd.column('Done', on_drop=handle_drop):
dnd.card(ToDo('Invent NiceGUI'))
dnd.card(ToDo('Test in own Projects'))
dnd.card(ToDo('Publish as Open Source'))
dnd.card(ToDo('Release Native-Mode'))
ui.run()
|
f044ecf6b044d4097a31d4c021d122e74e5e91f0
|
94a69d05880fdb03ad915a63f3575ff01e5df0e6
|
/isobar/io/netclock/__init__.py
|
12a6a4a93c43402da206af7fbfa33338b3c438c6
|
[
"MIT"
] |
permissive
|
ideoforms/isobar
|
06f2a5553b33e8185c6f9aed06224811589f7b70
|
12b03500ea882f17c3521700f7f74b0e36e4b335
|
refs/heads/master
| 2023-07-20T20:31:13.040686
| 2023-07-17T19:19:01
| 2023-07-17T19:19:01
| 2,155,202
| 320
| 53
|
MIT
| 2023-05-01T21:29:46
| 2011-08-04T15:12:03
|
Python
|
UTF-8
|
Python
| false
| false
| 81
|
py
|
__init__.py
|
from .receiver import NetworkClockReceiver
from .sender import NetworkClockSender
|
b45d03baf5c9eeacbab5807511c79339545d25af
|
8c4af05e0257661195c95b0b9e0873eeb6391dab
|
/packages/python-packages/doc-warden/warden/cmd_entry.py
|
681553f5f4e06830e9e487b4ab1bc1ab65775ef0
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-sdk-tools
|
6d171054800807fcbe7b8b878c5903a202d31faa
|
2dce521dedc3f5169007d4c481ae8ec077be4450
|
refs/heads/main
| 2023-09-01T00:00:32.662190
| 2023-08-31T22:21:44
| 2023-08-31T22:21:44
| 170,592,186
| 113
| 174
|
MIT
| 2023-09-14T21:53:41
| 2019-02-13T22:55:46
|
C#
|
UTF-8
|
Python
| false
| false
| 10,640
|
py
|
cmd_entry.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import print_function
from .enforce_target_file_presence import find_missing_target_files
from .enforce_readme_content import verify_readme_content
from .enforce_changelog_content import verify_changelog_content
from .index_packages import index_packages, render
from .WardenConfiguration import WardenConfiguration
from .PackageInfo import PackageInfo
import os
import logging
# CONFIGURATION. ENTRY POINT. EXECUTION.
def console_entry_point():
cfg = WardenConfiguration()
if cfg.verbose_output:
cfg.dump()
command_selector = {
'scan': all_operations,
'content': verify_content,
'presence': verify_presence,
'index': index
}
if cfg.command in command_selector:
command_selector.get(cfg.command)(cfg)
else:
print('Unrecognized command invocation {}.'.format(cfg.command))
exit(1)
# index the packages present in the repository
def index(config):
packages = index_packages(config)
render(config, packages)
if config.verbose_output:
print('Warden located the following packages: ')
for pkg in packages:
print(pkg.package_id)
# verify the content of readmes or changelogs
def verify_content(config):
packages = index_packages(config)
if config.target == 'all':
print('Only use the `all` switch when runing the `scan` command')
exit(1)
if config.target == 'readme':
content_results, ignored_content_results = verify_readme_content(config)
output_readme_content_results(content_results, config)
exit_on_readme_content_issues(content_results, config)
if config.target == 'changelog':
missing_changelog, empty_release_notes = verify_changelog_content(config, packages)
output_changelog_content_results(missing_changelog, empty_release_notes)
exit_on_changelog_content_issues(missing_changelog, empty_release_notes, config)
# verify the presence of the target_files (Readme or Changelog)
def verify_presence(config):
if config.target == 'all':
print('Only use the `all` switch when runing the `scan` command')
exit(1)
presence_results, ignored_presence_results = find_missing_target_files(config)
output_presence_results(presence_results, config)
exit_on_presence_issues(presence_results, config)
# Verify Case of readme files Present
def verify_file_case_readme(pkg_list, config):
readmes_with_wrong_case = []
if pkg_list is None:
return readmes_with_wrong_case
for pkg in pkg_list:
if pkg.relative_readme_location:
if not os.path.splitext(os.path.basename(pkg.relative_readme_location))[0].isupper():
readmes_with_wrong_case.append(os.path.normpath(os.path.join(config.target_directory, pkg.relative_readme_location)))
return readmes_with_wrong_case
# Verify Case of changelog files Present
def verify_file_case_changelog(pkg_list, config):
changelogs_with_wrong_case = []
for pkg in pkg_list:
if pkg.relative_changelog_location:
if not os.path.splitext(os.path.basename(pkg.relative_changelog_location))[0].isupper():
changelogs_with_wrong_case.append(os.path.normpath(os.path.join(config.target_directory, pkg.relative_changelog_location)))
return changelogs_with_wrong_case
# Exit if there are any presence issues
def exit_on_presence_issues(presence_results, config):
if len(presence_results) > 0:
conclusion_message()
exit(1)
# Exit if there are readme content issues
def exit_on_readme_content_issues(content_results, config):
if len(content_results) > 0:
conclusion_message()
exit(1)
# Exit if there are changelog content issues
def exit_on_changelog_content_issues(missing_changelog, empty_release_notes, config):
if len(missing_changelog) > 0:
conclusion_message()
exit(1)
if config.pipeline_stage == 'release' and len(empty_release_notes) > 0:
conclusion_message()
exit(1)
# print content results for readme
def output_readme_content_results(readmes_with_issues, config):
length = len(readmes_with_issues)
if length:
print('{0} {1} at least one missing required section.'.format(length, pluralize('readme has', 'readmes have', length)))
for readme_tuple in readmes_with_issues:
header = '{0} is missing {1} with {2}:'.format(
config.get_output_path(readme_tuple[0]),
pluralize('a header', 'headers', len(readme_tuple[1])),
pluralize('the pattern', 'patterns', len(readme_tuple[1]))
)
print(header)
for missing_pattern in readme_tuple[1]:
print(' * {0}'.format(format_header_path(missing_pattern)))
print()
def format_header_path(pattern):
return " -> ".join(pattern)
# print content results for changelog
def output_changelog_content_results(missing_changelog, empty_release_notes):
if len(missing_changelog):
print('{0} {1} missing entry{2} for the latest package version'.format(len(missing_changelog), pluralize('changelog has', 'changelogs have', len(missing_changelog)), pluralize('', 's', len(missing_changelog))))
print()
for changelog_tuple in missing_changelog:
print('MISSING CHANGELOG ENTRY: Latest Version {0} is missing in {1}. Add changelog for latest version'.format(changelog_tuple[1]['curr_pkg_version'], changelog_tuple[0]))
print()
if len(empty_release_notes):
print('{0} {1} empty release note for the latest package version'.format(len(empty_release_notes), pluralize('changelog has', 'changelogs have', len(empty_release_notes))))
print()
for changelog_tuple in empty_release_notes:
print('EMPTY CHANGELOG ENTRY: Latest Version {0} has no release notes in {1}. Consider adding release notes'.format(changelog_tuple[1]['curr_pkg_version'], changelog_tuple[0]))
print()
# print presence results
def output_presence_results(missing_target_file_paths, config):
if len(missing_target_file_paths):
print('{0} missing {1}{2} detected at:'.format(len(missing_target_file_paths), config.target_files[0], 's' if len(missing_target_file_paths) > 1 else ''))
for path in missing_target_file_paths:
print(config.get_output_path(path))
print()
# print case issues
def output_case_results(readmes_with_wrong_case, changelogs_with_wrong_case):
if readmes_with_wrong_case:
print('{0} Readme{1} are wrongly named:'.format(len(readmes_with_wrong_case), 's' if len(readmes_with_wrong_case) > 1 else ''))
for path in readmes_with_wrong_case:
print(path)
print()
if changelogs_with_wrong_case:
print('{0} Changelog{1} are wrongly named:'.format(len(changelogs_with_wrong_case), 's' if len(changelogs_with_wrong_case) > 1 else ''))
for path in changelogs_with_wrong_case:
print(path)
print()
# Run both presence and content verification on changelogs
def all_operations_readme(config, packages):
config.target_files = ['readme.rst', 'readme.md'] if config.scan_language == 'python' else ['readme.md']
if config.verbose_output:
print('Starting Readme Presence Examination')
readme_presence_results, ignored_readme_presence_results = find_missing_target_files(config)
if config.verbose_output:
print('Done with Readme Presence Examination')
print('Starting Readme Content Examination')
readme_content_results, ignored_readme_content_results = verify_readme_content(config)
if config.verbose_output:
print('Done with Readme Content Examination')
readmes_with_wrong_case = verify_file_case_readme(packages, config)
output_presence_results(readme_presence_results, config)
output_readme_content_results(readme_content_results, config)
output_case_results(readmes_with_wrong_case, None)
if len(readme_content_results) > 0 or len(readme_presence_results) > 0 or len(readmes_with_wrong_case) > 0:
return 1
else:
return 0
# Run both presence and content verification on readmes
def all_operations_changelog(config, packages):
config.target_files = ['history.rst', 'history.md'] if config.scan_language == 'python' else ['changelog.md']
if config.verbose_output:
print('Starting Changelog Presence Examination')
changelog_presence_results, ignored_changelog_presence_results = find_missing_target_files(config)
if config.verbose_output:
print('Done with Changelog Presence Examination')
print('Starting Changelog Content Examination')
missing_changelog, empty_release_notes = verify_changelog_content(config, packages)
if config.verbose_output:
print('Done with Changelog Content Examination')
changelogs_with_wrong_case = verify_file_case_changelog(packages, config)
output_presence_results(changelog_presence_results, config)
output_changelog_content_results(missing_changelog, empty_release_notes)
output_case_results(None, changelogs_with_wrong_case)
if len(missing_changelog) > 0 or len(changelog_presence_results) > 0 or len(changelogs_with_wrong_case):
return 1
elif len(empty_release_notes) > 0 and config.pipeline_stage == 'release':
return 1
else:
return 0
# execute both presence and content verification
def all_operations(config):
packages = index_packages(config)
result = 0
if config.target == 'default':
result = all_operations_readme(config, None)
elif config.target == 'readme':
result = all_operations_readme(config, packages)
elif config.target == 'changelog':
result = all_operations_changelog(config, packages)
elif config.target == 'all':
readme_result = all_operations_readme(config, packages)
changelog_result = all_operations_changelog(config, packages)
result = readme_result or changelog_result
if result == 1:
conclusion_message()
exit(1)
# return the plural form of the string given a count > 1
def pluralize(string, plural_string, count):
return plural_string if count > 1 else string
# final output. Could get longer or pull from a template in the future.
def conclusion_message():
print('For a rundown on what you need to do to resolve this breaking issue ASAP, check out aka.ms/azure-sdk-analyze-failed')
|
e9f44eebb04a3bb7e153204db04e6fd54e260088
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-enterpriseknowledgegraph/google/cloud/enterpriseknowledgegraph/__init__.py
|
4323129d052fb3e97c9ee6cd2799b8c5aa65d25b
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,766
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.enterpriseknowledgegraph import gapic_version as package_version
__version__ = package_version.__version__
from google.cloud.enterpriseknowledgegraph_v1.services.enterprise_knowledge_graph_service.async_client import (
EnterpriseKnowledgeGraphServiceAsyncClient,
)
from google.cloud.enterpriseknowledgegraph_v1.services.enterprise_knowledge_graph_service.client import (
EnterpriseKnowledgeGraphServiceClient,
)
from google.cloud.enterpriseknowledgegraph_v1.types.job_state import JobState
from google.cloud.enterpriseknowledgegraph_v1.types.operation_metadata import (
CommonOperationMetadata,
)
from google.cloud.enterpriseknowledgegraph_v1.types.service import (
AffinityClusteringConfig,
BigQueryInputConfig,
CancelEntityReconciliationJobRequest,
ConnectedComponentsConfig,
CreateEntityReconciliationJobRequest,
DeleteEntityReconciliationJobRequest,
DeleteOperationMetadata,
EntityReconciliationJob,
GetEntityReconciliationJobRequest,
InputConfig,
ListEntityReconciliationJobsRequest,
ListEntityReconciliationJobsResponse,
LookupPublicKgRequest,
LookupPublicKgResponse,
LookupRequest,
LookupResponse,
OutputConfig,
ReconConfig,
SearchPublicKgRequest,
SearchPublicKgResponse,
SearchRequest,
SearchResponse,
)
__all__ = (
"EnterpriseKnowledgeGraphServiceClient",
"EnterpriseKnowledgeGraphServiceAsyncClient",
"JobState",
"CommonOperationMetadata",
"AffinityClusteringConfig",
"BigQueryInputConfig",
"CancelEntityReconciliationJobRequest",
"ConnectedComponentsConfig",
"CreateEntityReconciliationJobRequest",
"DeleteEntityReconciliationJobRequest",
"DeleteOperationMetadata",
"EntityReconciliationJob",
"GetEntityReconciliationJobRequest",
"InputConfig",
"ListEntityReconciliationJobsRequest",
"ListEntityReconciliationJobsResponse",
"LookupPublicKgRequest",
"LookupPublicKgResponse",
"LookupRequest",
"LookupResponse",
"OutputConfig",
"ReconConfig",
"SearchPublicKgRequest",
"SearchPublicKgResponse",
"SearchRequest",
"SearchResponse",
)
|
08fdbb3fbd34f31ffab470d9857ff33b0551b718
|
37e88c82b29bb92819ee7f82d6d24d778f78ab99
|
/run.py
|
407e561634d5bfad5b3d5aaf845f1f7fc03d0393
|
[
"Apache-2.0"
] |
permissive
|
phage-nz/ph0neutria
|
134fc27b074618bd4b6a7685235ec5dd525eebbb
|
865aae37d8503d3f580f6762aa67f65958355ba7
|
refs/heads/master
| 2022-07-04T15:14:29.136172
| 2020-04-24T23:37:23
| 2020-04-24T23:37:23
| 70,286,606
| 281
| 65
|
Apache-2.0
| 2020-04-12T06:46:57
| 2016-10-07T22:12:20
|
Python
|
UTF-8
|
Python
| false
| false
| 73
|
py
|
run.py
|
#!/usr/bin/python3
from core.core_utils import start_core
start_core()
|
ba5e2b3f2ea512ec970212e5bd350a295075a456
|
cfb5afe81701fa29c45c3fd759b0ebd46e2eb076
|
/onmt/modules/embeddings.py
|
87eddd39ec04add489564d275e486a5c468599bb
|
[
"MIT"
] |
permissive
|
OpenNMT/OpenNMT-py
|
00b533d451d6a2b907a41f531898c3d70830f16e
|
2b13ed1a0f50d4555d2e1d8729a2de42940be5e4
|
refs/heads/master
| 2023-09-03T17:44:52.635771
| 2023-08-29T13:18:14
| 2023-08-29T13:18:14
| 82,841,862
| 6,680
| 2,749
|
MIT
| 2023-09-13T14:00:30
| 2017-02-22T19:01:50
|
Python
|
UTF-8
|
Python
| false
| false
| 15,550
|
py
|
embeddings.py
|
""" Embeddings module """
import math
import warnings
import torch
import torch.nn as nn
from onmt.modules.util_class import Elementwise
from onmt.utils.logging import logger
class SequenceTooLongError(Exception):
pass
class PositionalEncoding(nn.Module):
"""Sinusoidal positional encoding for non-recurrent neural networks.
Implementation based on "Attention Is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
Args:
dim (int): embedding size
"""
def __init__(self, dim, enc_type, max_len=5000):
if dim % 2 != 0:
raise ValueError(
"Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(dim)
)
if enc_type == "SinusoidalInterleaved":
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(
(
torch.arange(0, dim, 2, dtype=torch.float)
* -(math.log(10000.0) / dim)
)
)
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
elif enc_type == "SinusoidalConcat":
half_dim = dim // 2
pe = math.log(10000) / (half_dim - 1)
pe = torch.exp(torch.arange(half_dim, dtype=torch.float) * -pe)
pe = torch.arange(max_len, dtype=torch.float).unsqueeze(1) * pe.unsqueeze(0)
pe = torch.cat([torch.sin(pe), torch.cos(pe)], dim=1).view(max_len, -1)
else:
raise ValueError(
"Choice of Position encoding is SinusoidalInterleaved or"
" SinusoidalConcat."
)
pe = pe.unsqueeze(1) # we keep pe (len x batch x dim) for back comp
super(PositionalEncoding, self).__init__()
self.register_buffer("pe", pe)
self.dim = dim
def forward(self, emb, step=None):
"""Embed inputs.
Args:
emb (FloatTensor): Sequence of word vectors
``(batch_size, seq_len, self.dim)``
step (int or NoneType): If stepwise (``seq_len = 1``), use
the encoding for this position.
"""
pe = self.pe.transpose(0, 1) # (batch x len x dim)
emb = emb * math.sqrt(self.dim)
step = step or 0
if pe.size(1) < step + emb.size(1):
raise SequenceTooLongError(
f"Sequence is {emb.size(1) + step} but PositionalEncoding is"
f" limited to {self.pe.size(1)}. See max_len argument."
)
emb = emb + pe[:, step : emb.size(1) + step, :]
return emb
class Embeddings(nn.Module):
"""Words embeddings for encoder/decoder.
Additionally includes ability to add sparse input features
based on "Linguistic Input Features Improve Neural Machine Translation"
:cite:`sennrich2016linguistic`.
.. mermaid::
graph LR
A[Input]
C[Feature 1 Lookup]
A-->B[Word Lookup]
A-->C
A-->D[Feature N Lookup]
B-->E[MLP/Concat]
C-->E
D-->E
E-->F[Output]
Args:
word_vec_size (int): size of the dictionary of embeddings.
word_vocab_size (int): size of dictionary of embeddings for words.
word_padding_idx (int): padding index for words in the embeddings.
position_encoding (bool): see :class:`~onmt.modules.PositionalEncoding`
feat_merge (string): merge action for the features embeddings:
concat, sum or mlp.
feat_vec_exponent (float): when using `-feat_merge concat`, feature
embedding size is N^feat_dim_exponent, where N is the
number of values the feature takes.
feat_vec_size (int): embedding dimension for features when using
`-feat_merge mlp`
feat_padding_idx (List[int]): padding index for a list of features
in the embeddings.
feat_vocab_sizes (List[int], optional): list of size of dictionary
of embeddings for each feature.
dropout (float): dropout probability.
sparse (bool): sparse embbedings default False
freeze_word_vecs (bool): freeze weights of word vectors.
"""
def __init__(
self,
word_vec_size,
word_vocab_size,
word_padding_idx,
position_encoding=False,
position_encoding_type="SinusoidalInterleaved",
feat_merge="concat",
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=0,
sparse=False,
freeze_word_vecs=False,
):
self._validate_args(
feat_merge,
feat_vocab_sizes,
feat_vec_exponent,
feat_vec_size,
feat_padding_idx,
)
if feat_padding_idx is None:
feat_padding_idx = []
self.word_padding_idx = word_padding_idx
self.word_vec_size = word_vec_size
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == "sum":
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab**feat_vec_exponent) for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [
nn.Embedding(vocab, dim, padding_idx=pad, sparse=sparse)
for vocab, dim, pad in emb_params
]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = sum(emb_dims) if feat_merge == "concat" else word_vec_size
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module("emb_luts", emb_luts)
if feat_merge == "mlp" and len(feat_vocab_sizes) > 0:
in_dim = sum(emb_dims)
mlp = nn.Sequential(nn.Linear(in_dim, word_vec_size), nn.ReLU())
self.make_embedding.add_module("mlp", mlp)
self.position_encoding = position_encoding
self.dropout = nn.Dropout(p=dropout)
if self.position_encoding:
pe = PositionalEncoding(self.embedding_size, position_encoding_type)
self.make_embedding.add_module("pe", pe)
if freeze_word_vecs:
self.word_lut.weight.requires_grad = False
def _validate_args(
self,
feat_merge,
feat_vocab_sizes,
feat_vec_exponent,
feat_vec_size,
feat_padding_idx,
):
if feat_merge == "sum":
# features must use word_vec_size
if feat_vec_exponent != 0.7:
warnings.warn(
"Merging with sum, but got non-default "
"feat_vec_exponent. It will be unused."
)
if feat_vec_size != -1:
warnings.warn(
"Merging with sum, but got non-default "
"feat_vec_size. It will be unused."
)
elif feat_vec_size > 0:
# features will use feat_vec_size
if feat_vec_exponent != -1:
warnings.warn(
"Not merging with sum and positive "
"feat_vec_size, but got non-default "
"feat_vec_exponent. It will be unused."
)
else:
if feat_vec_exponent <= 0:
raise ValueError(
"Using feat_vec_exponent to determine "
"feature vec size, but got feat_vec_exponent "
"less than or equal to 0."
)
n_feats = len(feat_vocab_sizes)
if n_feats != len(feat_padding_idx):
raise ValueError(
"Got unequal number of feat_vocab_sizes and "
"feat_padding_idx ({:d} != {:d})".format(n_feats, len(feat_padding_idx))
)
@property
def word_lut(self):
"""Word look-up table."""
return self.make_embedding[0][0]
@property
def emb_luts(self):
"""Embedding look-up table."""
return self.make_embedding[0]
def load_pretrained_vectors(self, emb_file):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
"""
if emb_file:
pretrained = torch.load(emb_file)
pretrained_vec_size = pretrained.size(1)
if self.word_vec_size > pretrained_vec_size:
self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained
elif self.word_vec_size < pretrained_vec_size:
self.word_lut.weight.data.copy_(pretrained[:, : self.word_vec_size])
else:
self.word_lut.weight.data.copy_(pretrained)
def forward(self, source, step=None):
"""Computes the embeddings for words and features.
Args:
source (LongTensor): index tensor ``(batch, len, nfeat)``
Returns:
FloatTensor: Word embeddings ``(batch, len, embedding_size)``
"""
if self.position_encoding:
for i, module in enumerate(self.make_embedding._modules.values()):
if i == len(self.make_embedding._modules.values()) - 1:
source = module(source, step=step)
else:
source = module(source)
else:
source = self.make_embedding(source)
return self.dropout(source)
def update_dropout(self, dropout):
self.dropout.p = dropout
# Some utilitary functions for pretrained embeddings
def read_embeddings(path, skip_lines=0, filter_set=None):
"""
Read an embeddings file in the glove format.
"""
embs = dict()
total_vectors_in_file = 0
with open(path, "rb") as f:
for i, line in enumerate(f):
if i < skip_lines:
continue
if not line:
break
if len(line) == 0:
# is this reachable?
continue
l_split = line.decode("utf8").strip().split(" ")
if len(l_split) == 2:
continue
total_vectors_in_file += 1
if filter_set is not None and l_split[0] not in filter_set:
continue
embs[l_split[0]] = [float(em) for em in l_split[1:]]
return embs, total_vectors_in_file
def calc_vocab_load_stats(vocab, loaded_embed_dict):
matching_count = len(set(vocab.ids_to_tokens) & set(loaded_embed_dict.keys()))
missing_count = len(vocab) - matching_count
percent_matching = matching_count / len(vocab) * 100
return matching_count, missing_count, percent_matching
def convert_to_torch_tensor(word_to_float_list_dict, vocab):
dim = len(next(iter(word_to_float_list_dict.values())))
tensor = torch.zeros((len(vocab), dim))
for word, values in word_to_float_list_dict.items():
tensor[vocab.tokens_to_ids[word]] = torch.Tensor(values)
return tensor
def prepare_pretrained_embeddings(opt, vocabs):
if all(
[
opt.both_embeddings is None,
opt.src_embeddings is None,
opt.tgt_embeddings is None,
]
):
return
assert (
opt.save_data
), "-save_data is required when using \
pretrained embeddings."
vocs = []
for side in ["src", "tgt"]:
vocab = vocabs[side]
vocs.append(vocab)
enc_vocab, dec_vocab = vocs
skip_lines = 1 if opt.embeddings_type == "word2vec" else 0
if opt.both_embeddings is not None:
set_of_src_and_tgt_vocab = set(enc_vocab.ids_to_tokens) | set(
dec_vocab.ids_to_tokens
)
logger.info(
"Reading encoder and decoder embeddings from {}".format(opt.both_embeddings)
)
src_vectors, total_vec_count = read_embeddings(
opt.both_embeddings, skip_lines, set_of_src_and_tgt_vocab
)
tgt_vectors = src_vectors
logger.info("\tFound {} total vectors in file".format(total_vec_count))
else:
if opt.src_embeddings is not None:
logger.info("Reading encoder embeddings from {}".format(opt.src_embeddings))
src_vectors, total_vec_count = read_embeddings(
opt.src_embeddings, skip_lines, filter_set=set(enc_vocab.ids_to_tokens)
)
logger.info("\tFound {} total vectors in file.".format(total_vec_count))
else:
src_vectors = None
if opt.tgt_embeddings is not None:
logger.info("Reading decoder embeddings from {}".format(opt.tgt_embeddings))
tgt_vectors, total_vec_count = read_embeddings(
opt.tgt_embeddings, skip_lines, filter_set=set(dec_vocab.ids_to_tokens)
)
logger.info("\tFound {} total vectors in file".format(total_vec_count))
else:
tgt_vectors = None
logger.info("After filtering to vectors in vocab:")
if opt.src_embeddings is not None or opt.both_embeddings is not None:
logger.info(
"\t* enc: %d match, %d missing, (%.2f%%)"
% calc_vocab_load_stats(enc_vocab, src_vectors)
)
if opt.tgt_embeddings is not None or opt.both_embeddings is not None:
logger.info(
"\t* dec: %d match, %d missing, (%.2f%%)"
% calc_vocab_load_stats(dec_vocab, tgt_vectors)
)
# Write to file
enc_output_file = opt.save_data + ".enc_embeddings.pt"
dec_output_file = opt.save_data + ".dec_embeddings.pt"
if opt.src_embeddings is not None or opt.both_embeddings is not None:
logger.info("\nSaving encoder embeddings as:\n\t* enc: %s" % enc_output_file)
torch.save(convert_to_torch_tensor(src_vectors, enc_vocab), enc_output_file)
# set the opt in place
opt.pre_word_vecs_enc = enc_output_file
if opt.tgt_embeddings is not None or opt.both_embeddings is not None:
logger.info("\nSaving decoder embeddings as:\n\t* dec: %s" % dec_output_file)
torch.save(convert_to_torch_tensor(tgt_vectors, dec_vocab), dec_output_file)
# set the opt in place
opt.pre_word_vecs_dec = dec_output_file
|
d903537f7113d003d157e3d626c1ec6c7ed82bf8
|
8e90a7759ec7143427823547e0fbff58e0343aaa
|
/docker_sdk_api/domain/exceptions/api_request_exception.py
|
44b26547bdc1570df7804ac0e9e400457d8f1700
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
BMW-InnovationLab/BMW-TensorFlow-Training-GUI
|
646a6f86f26887e94351b4c572b7fe7f0842f75c
|
06531dae14365986c86baf735fd149317f4bb67a
|
refs/heads/master
| 2023-07-20T01:48:27.299962
| 2023-07-12T15:22:22
| 2023-07-12T15:22:22
| 227,429,492
| 1,030
| 198
|
Apache-2.0
| 2023-05-22T17:40:23
| 2019-12-11T18:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
api_request_exception.py
|
from domain.exceptions.application_error import ApplicationError
class ApiRequestException(ApplicationError):
def __init__(self, additional_message: str = '', container_name: str = ''):
super().__init__('Api Request failed for container {}: '.format(container_name), additional_message)
|
daa67b33a717c3851c69f677978620a9b38e42be
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/tools/ci_build/github/js/validate-npm-packages.py
|
b009330764973da1d2a3825c7db54526f10ca775
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 7,821
|
py
|
validate-npm-packages.py
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
# This is a script to validate NPM packages.
# If package version, publish tag and filename does not fulfill the requirement, an error will raise.
# arg.1 - Folder of extracted artifact "onnxruntime-node" for node.js binding
ort_node_pkg_dir = sys.argv[1]
# arg.2 - Folder of extracted artifact "onnxruntime-web" for web
ort_web_pkg_dir = sys.argv[2]
# arg.3 - Folder of extracted artifact "onnxruntime-react-native" for react native
ort_react_native_pkg_dir = sys.argv[3]
# arg.4 - source branch, eg. "refs/heads/master"
source_branch = sys.argv[4]
# arg.5 - NPM tag, eg. "", "dev", "latest", "rc"
tag = sys.argv[5]
# print out command line parameters
print("====== argv ======")
print("ort_node_pkg_dir:", ort_node_pkg_dir)
print("ort_web_pkg_dir:", ort_web_pkg_dir)
print("ort_react_native_pkg_dir:", ort_react_native_pkg_dir)
print("source_branch:", source_branch)
print("tag:", tag)
# check release flags from environment variables
RELEASE_NODE = os.environ.get("RELEASE_NODE", "") == "1"
RELEASE_WEB = os.environ.get("RELEASE_WEB", "") == "1"
RELEASE_REACT_NATIVE = os.environ.get("RELEASE_REACT_NATIVE", "") == "1"
# print ouf release flags
print("====== flags ======")
print("RELEASE_NODE:", RELEASE_NODE)
print("RELEASE_WEB:", RELEASE_WEB)
print("RELEASE_REACT_NATIVE:", RELEASE_REACT_NATIVE)
if not RELEASE_NODE and not RELEASE_WEB and not RELEASE_REACT_NATIVE:
raise Exception("not releasing any package. exiting.")
count_ort_node_common_tgz = 0
count_ort_node_tgz = 0
ort_node_common_ver = ""
ort_node_ver = ""
for file in os.listdir(ort_node_pkg_dir):
if file.startswith("onnxruntime-common-") and file.endswith(".tgz"):
ort_node_common_ver = file[19:-4]
count_ort_node_common_tgz += 1
if file.startswith("onnxruntime-node-") and file.endswith(".tgz"):
ort_node_ver = file[17:-4]
count_ort_node_tgz += 1
count_ort_web_common_tgz = 0
count_ort_web_tgz = 0
ort_web_common_ver = ""
ort_web_ver = ""
for file in os.listdir(ort_web_pkg_dir):
if file.startswith("onnxruntime-common-") and file.endswith(".tgz"):
ort_web_common_ver = file[19:-4]
count_ort_web_common_tgz += 1
if file.startswith("onnxruntime-web-") and file.endswith(".tgz"):
ort_web_ver = file[16:-4]
count_ort_web_tgz += 1
count_ort_react_native_common_tgz = 0
count_ort_react_native_tgz = 0
ort_react_native_common_ver = ""
ort_react_native_ver = ""
for file in os.listdir(ort_react_native_pkg_dir):
if file.startswith("onnxruntime-common-") and file.endswith(".tgz"):
ort_react_native_common_ver = file[19:-4]
count_ort_react_native_common_tgz += 1
if file.startswith("onnxruntime-react-native-") and file.endswith(".tgz"):
ort_react_native_ver = file[25:-4]
count_ort_react_native_tgz += 1
if count_ort_node_common_tgz >= 2:
raise Exception("expect at most 1 package file for onnxruntime-common in onnxruntime-node folder")
if count_ort_web_common_tgz >= 2:
raise Exception("expect at most 1 package file for onnxruntime-common in onnxruntime-web folder")
if count_ort_react_native_common_tgz >= 2:
raise Exception("expect at most 1 package file for onnxruntime-common in onnxruntime-react-native folder")
if RELEASE_NODE and RELEASE_WEB and count_ort_node_common_tgz != count_ort_web_common_tgz:
raise Exception("inconsistent package number for onnxruntime-common (onnxruntime-node/onnxruntime-web)")
if RELEASE_NODE and RELEASE_REACT_NATIVE and count_ort_node_common_tgz != count_ort_react_native_common_tgz:
raise Exception("inconsistent package number for onnxruntime-common (onnxruntime-node/onnxruntime-react-native)")
if RELEASE_WEB and RELEASE_REACT_NATIVE and count_ort_web_common_tgz != count_ort_react_native_common_tgz:
raise Exception("inconsistent package number for onnxruntime-common (onnxruntime-web/onnxruntime-react-native)")
if RELEASE_NODE and RELEASE_WEB and ort_node_common_ver != ort_web_common_ver:
raise Exception("inconsistent version number for onnxruntime-common (onnxruntime-node/onnxruntime-web)")
if RELEASE_NODE and RELEASE_REACT_NATIVE and ort_node_common_ver != ort_react_native_common_ver:
raise Exception("inconsistent version number for onnxruntime-common (onnxruntime-node/onnxruntime-react-native)")
if RELEASE_WEB and RELEASE_REACT_NATIVE and ort_web_common_ver != ort_react_native_common_ver:
raise Exception("inconsistent version number for onnxruntime-common (onnxruntime-web/onnxruntime-react-native)")
ort_common_ver = (
ort_node_common_ver if RELEASE_NODE else (ort_web_common_ver if RELEASE_WEB else ort_react_native_common_ver)
)
ort_common_from = "" if not ort_common_ver else ("node" if RELEASE_NODE else ("web" if RELEASE_WEB else "react-native"))
print("====== output environment variables ======")
print(f"##vso[task.setvariable variable=ORT_COMMON_FROM]{ort_common_from}")
if tag == "latest" or tag == "" or tag == "rc":
if not RELEASE_NODE or not RELEASE_WEB or not RELEASE_REACT_NATIVE:
raise Exception("@latest or @rc build must release all packages (node, web, react-native)")
if count_ort_node_common_tgz != 1:
raise Exception("expect one package file for onnxruntime-common for release build")
if count_ort_node_tgz != 1:
raise Exception("expect one package file for onnxruntime-node")
if count_ort_web_tgz != 1:
raise Exception("expect one package file for onnxruntime-web")
if count_ort_react_native_tgz != 1:
raise Exception("expect one package file for onnxruntime-react-native")
if RELEASE_NODE and RELEASE_WEB and ort_node_ver != ort_web_ver:
raise Exception("version number is different for onnxruntime-node and onnxruntime-web")
if RELEASE_NODE and RELEASE_REACT_NATIVE and ort_node_ver != ort_react_native_ver:
raise Exception("version number is different for onnxruntime-node and onnxruntime-react-native")
if RELEASE_WEB and RELEASE_REACT_NATIVE and ort_web_ver != ort_react_native_ver:
raise Exception("version number is different for onnxruntime-web and onnxruntime-react-native")
print("====== validated versions ======")
print(f"source_branch={source_branch}")
print(f"tag={tag}")
print(f"ort_common_ver={ort_common_ver}")
print(f"ort_node_ver={ort_node_ver}")
print(f"ort_web_ver={ort_web_ver}")
print(f"ort_react_native_ver={ort_react_native_ver}")
if tag == "latest" or tag == "":
print("Publishing @latest ...")
if not source_branch.startswith("refs/heads/rel-"):
raise Exception('@latest build must publish from source branch "refs/heads/rel-*"')
if (
"-" in ort_common_ver.replace("-rev", "")
or "-" in ort_web_ver.replace("-rev", "")
or "-" in ort_react_native_ver.replace("-rev", "")
):
raise Exception('@latest build version cannot contain "-" (unless -rev)')
if tag == "rc":
print("Publishing @rc ...")
if not source_branch.startswith("refs/heads/rel-"):
raise Exception('@rc build must publish from source branch "refs/heads/rel-*"')
if "-rc" not in ort_web_ver:
raise Exception('@rc build version should contain "-rc"')
if "-rc" not in ort_react_native_ver:
raise Exception('@rc build version should contain "-rc"')
if (
"-" not in ort_common_ver.replace("-rev", "")
and "-" not in ort_web_ver.replace("-rev", "")
and "-" not in ort_react_native_ver.replace("-rev", "")
and "+" not in ort_common_ver.replace("-rev", "")
and "+" not in ort_web_ver.replace("-rev", "")
and "+" not in ort_react_native_ver.replace("-rev", "")
):
if tag != "latest" and tag != "":
raise Exception("default version without decorator can only be published in @latest tag")
|
8559d1c502471ff5db4917f88a75067829c8e5aa
|
31cf77b4c0342c6148b35ae2613d5e2501d5e755
|
/src/encoded/tests/fixtures/schemas/computational_model.py
|
799af5e6a968f5e5c944178f3dbfaf4ccba8233b
|
[
"MIT"
] |
permissive
|
ENCODE-DCC/encoded
|
096de8a6d60c959a783cc9517f1d60bd6c21b71f
|
80e05610c79b46d0890228555bb03e436b2fef11
|
refs/heads/dev
| 2023-08-08T15:45:07.493187
| 2023-08-03T20:01:24
| 2023-08-03T20:01:24
| 7,045,549
| 110
| 69
|
MIT
| 2023-09-12T23:59:45
| 2012-12-07T00:52:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
computational_model.py
|
import pytest
@pytest.fixture
def computational_model(testapp, lab, award):
return{
'lab': lab['@id'],
'award': award['@id'],
'computational_model_type': 'imputation'
}
@pytest.fixture
def computational_model_unique_software(computational_model, software_version1, software_version2):
item = computational_model.copy()
item.update(
{
'software_used': [
software_version1['@id'],
software_version2['@id']
],
}
)
return item
@pytest.fixture
def computational_model_non_unique_software(computational_model,software_version1, software_version2):
item = computational_model.copy()
item.update(
{
'software_used': [
software_version1['@id'],
software_version2['@id'],
software_version2['@id']
],
}
)
return item
@pytest.fixture
def computational_model_1(testapp, lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'computational_model_type': 'imputation',
'schema_version': '1',
'internal_tags': ['ENCYCLOPEDIAv3', 'ENCYCLOPEDIAv4', 'ENCYCLOPEDIAv5', 'ENCYCLOPEDIAv6']
}
|
ee36a564ac10183ef7f65a1553cbad114db4841b
|
a38bf459ae380f67e0de22f7106a8df4385a7076
|
/tests/unit/common_types.py
|
a5d4a88d692222c5932f82665134e88c598a7017
|
[
"Apache-2.0"
] |
permissive
|
googleapis/gapic-generator-python
|
73ce9d52f6f5bb2652d49b237b24263d6637b1da
|
4eee26181e8db9fb5144eef5a76f178c1594e48a
|
refs/heads/main
| 2023-09-04T11:12:14.728757
| 2023-09-02T10:34:44
| 2023-09-02T10:34:44
| 129,809,857
| 116
| 65
|
Apache-2.0
| 2023-09-12T18:57:01
| 2018-04-16T21:47:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,256
|
py
|
common_types.py
|
# Copyright (C) 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import itertools
from collections import namedtuple
from typing import (Any, Dict, Iterable, Optional)
from google.protobuf import descriptor_pb2
from gapic.schema import metadata
from gapic.schema import wrappers
# Injected dummy test types
@dataclasses.dataclass(frozen=True)
class DummyMethod:
name: bool = False
input: bool = False
output: bool = False
lro: bool = False
void: bool = False
paged_result_field: bool = False
client_streaming: bool = False
server_streaming: bool = False
flattened_fields: Dict[str, Any] = dataclasses.field(default_factory=dict)
client_output: bool = False
client_output_async: bool = False
DummyIdent = namedtuple("DummyIdent", ["name", "sphinx"])
DummyIdent.__new__.__defaults__ = (False,) * len(DummyIdent._fields)
DummyMessageTypePB = namedtuple("DummyMessageTypePB", ["name"])
# DummyMessageBase = namedtuple(
# "DummyMessage", ["fields", "type", "options", "ident",])
# DummyMessageBase.__new__.__defaults__ = (False,) * len(DummyMessageBase._fields)
DummyFieldBase = namedtuple("DummyField",
["message",
"enum",
"name",
"repeated",
"required",
"resource_reference",
"oneof",
"field_pb",
"meta",
"is_primitive",
"ident",
"type"])
DummyFieldBase.__new__.__defaults__ = (False,) * len(DummyFieldBase._fields)
class DummyField(DummyFieldBase):
@property
def mock_value_original_type(self):
return "mock_value"
class DummyMessage:
def __init__(self, *, fields={}, type="", options=False, ident=False, resource_path=False, meta=None):
self.fields = fields
self.type = type
self.options = options
self.ident = ident
self.resource_path = resource_path
self.meta = meta or metadata.Metadata()
def get_field(self, field_name: str):
return self.fields[field_name]
def oneof_fields(self):
return dict((field.oneof, field) for field in self.fields.values() if field.oneof)
@property
def required_fields(self):
return [field for field in self.fields.values() if field.required]
@property
def resource_path_args(self):
return wrappers.MessageType.PATH_ARG_RE.findall(self.resource_path or '')
DummyService = namedtuple("DummyService", [
"name", "methods", "client_name", "async_client_name", "resource_messages_dict"])
DummyService.__new__.__defaults__ = (False,) * len(DummyService._fields)
DummyApiSchema = namedtuple("DummyApiSchema",
["services", "naming", "messages"])
DummyApiSchema.__new__.__defaults__ = (False,) * len(DummyApiSchema._fields)
DummyNaming = namedtuple(
"DummyNaming", ["warehouse_package_name", "name", "version", "versioned_module_name", "module_namespace", "proto_package"])
DummyNaming.__new__.__defaults__ = (False,) * len(DummyNaming._fields)
def message_factory(exp: str,
repeated_iter=itertools.repeat(False),
enum: Optional[wrappers.EnumType] = None,
) -> DummyMessage:
# This mimics the structure of MessageType in the wrappers module:
# A MessageType has a map from field names to Fields,
# and a Field has an (optional) MessageType.
# The 'exp' parameter is a dotted attribute expression
# used to describe the field and type hierarchy,
# e.g. "mollusc.cephalopod.coleoid"
toks = exp.split(".")
messages = [DummyMessage(fields={}, type=tok.upper() + "_TYPE")
for tok in toks]
if enum:
messages[-1] = enum
for base, field, attr_name, repeated_field in zip(
messages, messages[1:], toks[1:], repeated_iter
):
base.fields[attr_name] = (DummyField(message=field, repeated=repeated_field)
if isinstance(field, DummyMessage)
else DummyField(enum=field))
return messages[0]
def enum_factory(name: str, variants: Iterable[str]) -> wrappers.EnumType:
enum_pb = descriptor_pb2.EnumDescriptorProto(
name=name,
value=tuple(
descriptor_pb2.EnumValueDescriptorProto(name=v, number=i)
for i, v in enumerate(variants)
)
)
enum = wrappers.EnumType(
enum_pb=enum_pb,
values=[wrappers.EnumValueType(enum_value_pb=v) for v in enum_pb.value]
)
return enum
|
275e9bcd282b12d2918dcce334af42ef6fac2358
|
c3e0a6919caf85c35239ef23084df9bbf8dd61c3
|
/pypeit/core/skysub.py
|
565091517024b2b91b422549ffbed36e10d91a58
|
[
"BSD-3-Clause"
] |
permissive
|
pypeit/PypeIt
|
6eb9e5afd62acc9d363e497cd9e367d620f86ea4
|
0d2e2196afc6904050b1af4d572f5c643bb07e38
|
refs/heads/release
| 2023-08-25T21:15:59.113114
| 2023-06-04T15:23:39
| 2023-06-04T15:23:39
| 36,958,428
| 136
| 98
|
BSD-3-Clause
| 2023-09-12T17:42:15
| 2015-06-05T22:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 77,806
|
py
|
skysub.py
|
""" Module for sky subtraction
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import numpy as np
import scipy.ndimage
import scipy.special
import matplotlib.pyplot as plt
from IPython import embed
from pypeit.images import imagebitmask
from pypeit.core import basis, pixels, extract
from pypeit.core import fitting
from pypeit.core import procimg
from pypeit import msgs, utils, bspline, slittrace
from pypeit.display import display
def skysub_npoly(thismask):
"""
Utility routine used by global_skysub and local_skysub_extract.
Determine the order for the spatial
polynomial for global sky subtraction and local sky subtraction.
Args:
thismask (`numpy.ndarray`_):
bool mask of shape (nspec, nspat) which
specifies pixels in the slit in question
Returns:
int: Order of polynomial
"""
slit_width = np.sum(thismask,axis=1)
med_slit_width = np.median(slit_width[slit_width > 0])
nspec_eff = np.sum(slit_width > 0.5*med_slit_width)
npercol = np.fmax(np.floor(np.sum(thismask)/nspec_eff), 1.0)
# Demand at least 10 pixels per row (on average) per degree of the polynomial
if npercol > 100:
npoly = 3
elif npercol > 40:
npoly = 2
else:
npoly = 1
return npoly
def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=None, bsp=0.6,
sigrej=3.0, maxiter=35, trim_edg=(3,3), pos_mask=True, max_mask_frac=0.80,
show_fit=False, no_poly=False, npoly=None):
"""
Perform global sky subtraction on an input slit
THIS NEEDS MORE DESCRIPTION
Args:
image (`numpy.ndarray`_):
Frame to be sky subtracted.
float ndarray, shape (nspec, nspat)
ivar (`numpy.ndarray`_):
Inverse variance image.
float ndarray, shape (nspec, nspat)
tilts (`numpy.ndarray`_):
Tilts indicating how wavelengths move across the slit.
float ndarray, shape (nspec, nspat)
thismask (`numpy.ndarray`_):
Specifies pixels in the slit in question.
boolean array, shape (nspec, nspat)
slit_left (`numpy.ndarray`_):
Left slit boundary in floating point pixels.
shape (nspec, 1) or (nspec)
slit_righ (`numpy.ndarray`_):
Right slit boundary in floating point pixels.
shape (nspec, 1) or (nspec)
inmask (`numpy.ndarray`_):
boolean ndarray, shape (nspec, nspat)
Input mask for pixels not to be included in sky subtraction
fits. True = Good (not masked), False = Bad (masked)
bsp (float, optional):
break point spacing in pixel units
sigrej (float, optional):
sigma rejection threshold
trim_edg (tuple, optional):
floats (left_edge, right_edge) that
indicate how many pixels to trim from left and right slit
edges for creating the edgemask. These pixels are excluded
from sky subtraction fits.
pos_mask (bool, optional):
First do a prelimnary fit to the log of the sky (i.e.
positive pixels only). Then use this fit to create an input
mask from the residuals lmask = (res < 5.0) & (res > -4.0)
for the full fit. NOTE: pos_mask should be False for
near-IR sky residual subtraction, since fitting the log(sky)
requires that the counts are positive which will not be the
case for i.e. an A-B image. Thus the routine will fail if
pos_mask is not set to False.
max_mask_frac (float, optional):
Maximum fraction of total pixels that can be masked by the input masks. If more than this
threshold is masked the code will return zeros and throw a warning.
show_fit (bool, optional):
If true, plot a fit of the sky pixels and model fit to the screen.
This feature will block further execution until the screen
is closed.
no_poly (bool, optional):
If True, do not incldue polynomial basis
npoly (int, optional):
Order of polynomial to use for the polynomial in the bspline
Only used if no_poly=False
Returns:
`numpy.ndarray`_ : The model sky background at the pixels where thismask is True::
>>> skyframe = np.zeros_like(image)
>>> thismask = slitpix == thisslit
>>> skyframe[thismask] = global_skysub(image,ivar, tilts, thismask, slit_left, slit_righ)
"""
# Synthesize ximg, and edgmask from slit boundaries. Doing this outside this
# routine would save time. But this is pretty fast, so we just do it here to make the interface simpler.
ximg, edgmask = pixels.ximg_and_edgemask(slit_left, slit_righ, thismask, trim_edg=trim_edg)
# TESTING!!!!
#no_poly=True
#show_fit=True
# Init
(nspec, nspat) = image.shape
piximg = tilts * (nspec-1)
if inmask is None:
inmask = (ivar > 0.0) & thismask & np.isfinite(image) & np.isfinite(ivar)
elif inmask.dtype != bool:
# Check that it's of type bool
msgs.error("Type of inmask should be bool and is of type: {:}".format(inmask.dtype))
# Sky pixels for fitting
gpm = thismask & (ivar > 0.0) & inmask & np.logical_not(edgmask)
bad_pixel_frac = np.sum(thismask & np.logical_not(gpm))/np.sum(thismask)
if bad_pixel_frac > max_mask_frac:
msgs.warn('This slit/order has {:5.3f}% of the pixels masked, which exceeds the threshold of {:f}%. '.format(100.0*bad_pixel_frac, 100.0*max_mask_frac)
+ msgs.newline() + 'There is likely a problem with this slit. Giving up on global sky-subtraction.')
return np.zeros(np.sum(thismask))
# Sub arrays
isrt = np.argsort(piximg[thismask])
pix = piximg[thismask][isrt]
sky = image[thismask][isrt]
sky_ivar = ivar[thismask][isrt]
ximg_fit = ximg[thismask][isrt]
inmask_fit = gpm[thismask][isrt]
inmask_prop = inmask_fit.copy()
#spatial = spatial_img[fit_sky][isrt]
# Restrict fit to positive pixels only and mask out large outliers via a pre-fit to the log.
if pos_mask:
pos_sky = (sky > 1.0) & (sky_ivar > 0.0)
if np.sum(pos_sky) > nspec:
lsky = np.log(sky[pos_sky])
lsky_ivar = inmask_fit[pos_sky].astype(float)/3.0**2 # set errors to just be 3.0 in the log
#lsky_ivar = np.full(lsky.shape, 0.1)
# Init bspline to get the sky breakpoints (kludgy)
lskyset, outmask, lsky_fit, red_chi, exit_status \
= fitting.bspline_profile(pix[pos_sky], lsky, lsky_ivar, np.ones_like(lsky),
ingpm=inmask_fit[pos_sky], upper=sigrej, lower=sigrej,
kwargs_bspline={'bkspace':bsp},
kwargs_reject={'groupbadpix': True, 'maxrej': 10})
if exit_status != 0:
msgs.warn('Global sky-subtraction did not exit cleanly for initial positive sky fit.'
+ msgs.newline() + 'Initial masking based on positive sky fit will be skipped')
else:
res = (sky[pos_sky] - np.exp(lsky_fit)) * np.sqrt(sky_ivar[pos_sky])
lmask = (res < 5.0) & (res > -4.0)
sky_ivar[pos_sky] = sky_ivar[pos_sky] * lmask
inmask_fit[pos_sky] = (sky_ivar[pos_sky] > 0.0) & lmask & inmask_prop[pos_sky]
# Include a polynomial basis?
if no_poly:
poly_basis = np.ones_like(sky)
npoly_fit = 1
else:
npoly_fit = skysub_npoly(thismask) if npoly is None else npoly
poly_basis = basis.flegendre(2.0*ximg_fit - 1.0, npoly_fit)
# Perform the full fit now
msgs.info("Full fit in global sky sub.")
skyset, outmask, yfit, _, exit_status = fitting.bspline_profile(pix, sky, sky_ivar, poly_basis, ingpm=inmask_fit, nord=4,
upper=sigrej, lower=sigrej, maxiter=maxiter,
kwargs_bspline={'bkspace':bsp},
kwargs_reject={'groupbadpix':True, 'maxrej': 10})
# TODO JFH This is a hack for now to deal with bad fits for which iterations do not converge. This is related
# to the groupbadpix behavior requested for the djs_reject rejection. It would be good to
# better understand what this functionality is doing, but it makes the rejection much more quickly approach a small
# chi^2
if exit_status == 1:
msgs.warn('Maximum iterations reached in bspline_profile global sky-subtraction for npoly={:d}.'.format(npoly_fit) +
msgs.newline() +
'Redoing sky-subtraction without polynomial degrees of freedom')
poly_basis = np.ones_like(sky)
# Perform the full fit now
skyset, outmask, yfit, _, exit_status \
= fitting.bspline_profile(pix, sky, sky_ivar, poly_basis, ingpm=inmask_fit, nord=4,
upper=sigrej, lower=sigrej, maxiter=maxiter,
kwargs_bspline={'bkspace': bsp},
kwargs_reject={'groupbadpix': False, 'maxrej': 10})
sky_frame = np.zeros_like(image)
ythis = np.zeros_like(yfit)
ythis[isrt] = yfit
sky_frame[thismask] = ythis
#skyset.funcname ='legendre'
#skyset.xmin = spat_min
#skyset.xmax = spat_max
# Evaluate and save
#bgframe, _ = skyset.value(piximg[thismask],x2=spatial_img[thismask])
# Debugging/checking
# ToDo This QA ceases to make sense I think for 2-d fits. I need to think about what the best QA would be here, but I think
# probably looking at residuals as a function of spectral and spatial position like in the flat fielding code.
if show_fit:
goodbk = skyset.mask
# This is approximate
yfit_bkpt = np.interp(skyset.breakpoints[goodbk], pix,yfit)
plt.clf()
ax = plt.gca()
was_fit_and_masked = inmask_fit & np.logical_not(outmask)
ax.plot(pix[inmask_fit], sky[inmask_fit], color='k', marker='o', markersize=0.4, mfc='k', fillstyle='full', linestyle='None', label='Pixels that were fit')
ax.plot(pix[was_fit_and_masked], sky[was_fit_and_masked], color='red', marker='+', markersize=1.5, mfc='red', fillstyle='full', linestyle='None', label='Pixels masked by fit')
ax.plot(pix, yfit, color='cornflowerblue', label='B-spline fit')
ax.plot(skyset.breakpoints[goodbk], yfit_bkpt, color='lawngreen', marker='o', markersize=4.0, mfc='lawngreen', fillstyle='full', linestyle='None', label='Good B-spline breakpoints')
ax.set_ylim((0.99*yfit.min(),1.01*yfit.max()))
plt.legend()
plt.show()
# Return
# ToDO worth thinking about whether we want to return a mask here. It makese no sense to return outmask
# in its present form though since that does not refer to the whole image.
# return bgframe, outmask
return ythis
def skyoptimal(piximg, data, ivar, oprof, sigrej=3.0, npoly=1, spatial_img=None, fullbkpt=None):
"""
Utility routine used by local_skysub_extract that performs the joint b-spline fit for sky-background
and object flux.
Parameters
----------
piximg : `numpy.ndarray`_
piximg is tilts*(nspec-1) where nspec is the number of pixels in the
spectral direction of the raw image. This is a wavelength in image
coordinates which acts as the independent variable for sky and
object model fits. This is 1d array (flattened in the calling
routine) with shape= (nflat,).
data : `numpy.ndarray`_
science data that is being fit. Same shape as piximg.
ivar : `numpy.ndarray`_
inverse variance of science data that is being fit. Same shape as piximg.
oprof : `numpy.ndarray`_
Flattened object profiles for the data that is being fit. Shape =
(nflat, nobj) where nobj is the number of objects being
simultaneously fit. In other words, there are nobj object profiles.
sigrej : :obj:`float`, optional
Sigma threshold for outlier rejection.
npoly : :obj:`int`, optional
Order of polynomaial for the sky-background basis function. If
spatial_img is passed in a fit with two independent variables will
be performed (spectral described by piximg, and spatial direction
described by spatia_img) and a legendre polynomial basis of order
npoly will be used for the spatial direction. If npoly=1 or if
spatial_img is not passed, a flat spatial profile basis funciton
will instead be used.
spatial_img : `numpy.ndarray`_, optional
Image of the spatial coordinates of each pixel in the image used for
2d fitting. Same shape as piximg.
fullbkpt : `numpy.ndarray`_, optional
A 1d float array containing the breakpoints to be used for the
B-spline fit. The breakpoints are arranged in the spectral
direction, i.e. along the directino of the piximg independent
variable.
Returns
-------
sky_bmodel : `numpy.ndarray`_
Array with same shape as piximg containing the B-spline model of the
sky.
obj_bmodel : `numpy.ndarray`_
Array with same shape as piximg containing the B-spline model of the
object flux.
gpm : `numpy.ndarray`_
Boolean good pixel mask array with the same shape as piximg indicating
whether a pixel is good (True) or was masked (False).
"""
sortpix = piximg.argsort()
nx = data.size
nc = oprof.shape[0]
nobj = int(oprof.size / nc)
if nc != nx:
raise ValueError('Object profile should have oprof.shape[0] equal to nx')
msgs.info('Iter Chi^2 Rejected Pts')
xmin = 0.0
xmax = 1.0
if ((npoly == 1) | (spatial_img is None)):
profile_basis = np.column_stack((oprof, np.ones(nx)))
else:
xmin = spatial_img.min()
xmax = spatial_img.max()
x2 = 2.0 * (spatial_img - xmin) / (xmax - xmin) - 1
poly_basis = basis.flegendre(x2, npoly)
profile_basis = np.column_stack((oprof, poly_basis))
relative_mask = (np.sum(oprof, axis=1) > 1e-10)
indx, = np.where(ivar[sortpix] > 0.0)
ngood = indx.size
good = sortpix[indx]
good = good[piximg[good].argsort()]
relative, = np.where(relative_mask[good])
gpm = np.zeros(piximg.shape, dtype=bool)
if ngood > 0:
sset1, gpm_good1, yfit1, red_chi1, exit_status \
= fitting.bspline_profile(piximg[good], data[good], ivar[good], profile_basis[good, :],
fullbkpt=fullbkpt, upper=sigrej, lower=sigrej,
relative=relative,
kwargs_reject={'groupbadpix': True, 'maxrej': 5})
else:
msgs.warn('All pixels are masked in skyoptimal. Not performing local sky subtraction.')
return np.zeros_like(piximg), np.zeros_like(piximg), gpm
chi2 = (data[good] - yfit1) ** 2 * ivar[good]
chi2_srt = np.sort(chi2)
gauss_prob = 1.0 - 2.0 * scipy.special.ndtr(-1.2 * sigrej)
sigind = int(np.fmin(np.rint(gauss_prob * float(ngood)), ngood - 1))
chi2_sigrej = chi2_srt[sigind]
mask1 = (chi2 < chi2_sigrej)
msgs.info('2nd round....')
msgs.info('Iter Chi^2 Rejected Pts')
if np.any(mask1):
sset, gpm_good, yfit, red_chi, exit_status \
= fitting.bspline_profile(piximg[good], data[good], ivar[good], profile_basis[good,:],
ingpm=mask1, fullbkpt=fullbkpt, upper=sigrej, lower=sigrej,
relative=relative,
kwargs_reject={'groupbadpix': True, 'maxrej': 1})
else:
msgs.warn('All pixels are masked in skyoptimal after first round of rejection. Not performing local sky subtraction.')
return np.zeros_like(piximg), np.zeros_like(piximg), gpm
ncoeff = npoly + nobj
skyset = bspline.bspline(None, fullbkpt=sset.breakpoints, nord=sset.nord, npoly=npoly)
# Set coefficients for the sky.
# The rehshape below deals with the different sizes of the coeff for npoly = 1 vs npoly > 1
# and mirrors similar logic in the bspline.py
skyset.coeff = sset.coeff[nobj:, :].reshape(skyset.coeff.shape)
skyset.mask = sset.mask
skyset.xmin = xmin
skyset.xmax = xmax
# JFH TODO Seems odd that spatial_img is not centered in the same way as x2 above. The value code recenters
# the x2 input about skyset.xmin and skyset.xmax but I admit I don't completely follow
sky_bmodel, _ = skyset.value(piximg, x2=spatial_img)
obj_bmodel = np.zeros(sky_bmodel.shape)
objset = bspline.bspline(None, fullbkpt=sset.breakpoints, nord=sset.nord)
objset.mask = sset.mask
for i in range(nobj):
objset.coeff = sset.coeff[i, :]
obj_bmodel1, _ = objset.value(piximg)
obj_bmodel = obj_bmodel + obj_bmodel1 * profile_basis[:, i]
gpm[good] = gpm_good
return sky_bmodel, obj_bmodel, gpm
def optimal_bkpts(bkpts_optimal, bsp_min, piximg, sampmask, samp_frac=0.80,
skyimage = None, min_spat=None, max_spat=None, debug=False):
"""
Generate an array of optimally spaced breakpoints for the
global sky subtraction algorithm.
Parameters
----------
bsp_min: float
Desired B-spline breakpoint spacing in pixels
piximg: `numpy.ndarray`_
Image containing the pixel sampling, i.e. (nspec-1)*tilts.
shape = (nspec, nspat)
sampmask: `numpy.ndarray`_
Boolean array indicating the pixels for which the B-spline fit will actually be evaluated. True = Good, False=Bad
samp_frac: float
The fraction of spectral direction pixels required to have a sampling difference < bsp_min in order to instead
adopt a uniform break point spacing, rather adopting the optimally spaced breakpoints.
skyimage: `numpy.ndarray`_
Sky model image used only for QA.
shape = (nspec, nspat)
min_spat: float, optional
Minimum spatial pixel used for local sky subtraction fitting. Only used for title of QA plot.
max_spat: float, optional
Maximum spatial pixel used for local sky subtraction fitting. Only used for title of QA plot.
debug: bool, optional
Show QA plot to debug breakpoint placing.
Returns
-------
fullbkpt: `numpy.ndarray`_
Locations of the optimally sampled breakpoints
"""
pix = piximg[sampmask]
isrt = pix.argsort()
pix = pix[isrt]
piximg_min = pix.min()
piximg_max = pix.max()
bset0 = bspline.bspline(pix, nord=4, bkspace=bsp_min)
fullbkpt_grid = bset0.breakpoints
keep = (fullbkpt_grid >= piximg_min) & (fullbkpt_grid <= piximg_max)
fullbkpt_grid = fullbkpt_grid[keep]
used_grid = False
if not bkpts_optimal:
msgs.info('bkpts_optimal = False --> using uniform bkpt spacing spacing: bsp={:5.3f}'.format(bsp_min))
fullbkpt = fullbkpt_grid
used_grid = True
else:
piximg_temp = np.ma.array(np.copy(piximg))
piximg_temp.mask = np.invert(sampmask)
samplmin = np.ma.min(piximg_temp,fill_value=np.inf,axis=1)
samplmin = samplmin[np.invert(samplmin.mask)].data
samplmax = np.ma.max(piximg_temp,fill_value=-np.inf,axis=1)
samplmax = samplmax[np.invert(samplmax.mask)].data
if samplmax.size != samplmin.size:
msgs.error('This should not happen')
nbkpt = samplmax.size
# Determine the sampling. dsamp represents the gap in spectral pixel (wavelength) coverage between
# subsequent spectral direction pixels in the piximg, i.e. it is the difference between the minimum
# value of the piximg at spectral direction pixel i+1, and the maximum value of the piximg at spectral
# direction pixel i. A negative value dsamp < 0 implies continuous sampling with no gaps, i.e. the
# the arc lines are sufficiently tilted that there is no sampling gap.
dsamp_init = np.roll(samplmin, -1) - samplmax
dsamp_init[nbkpt - 1] = dsamp_init[nbkpt - 2]
kernel_size = int(np.fmax(np.ceil(dsamp_init.size*0.01)//2*2 + 1,15)) # This ensures kernel_size is odd
dsamp_med = scipy.ndimage.median_filter(dsamp_init, size=kernel_size, mode='reflect')
boxcar_size = int(np.fmax(np.ceil(dsamp_med.size*0.005)//2*2 + 1,5))
# Boxcar smooth median dsamp
kernel = np.ones(boxcar_size)/ float(boxcar_size)
dsamp = scipy.ndimage.convolve(dsamp_med, kernel, mode='reflect')
# if more than samp_frac of the pixels have dsamp < bsp_min than just use a uniform breakpoint spacing
if np.sum(dsamp <= bsp_min) > samp_frac*nbkpt:
msgs.info('Sampling of wavelengths is nearly continuous.')
msgs.info('Using uniform bkpt spacing: bsp={:5.3f}'.format(bsp_min))
fullbkpt = fullbkpt_grid
used_grid = True
else:
fullbkpt_orig = samplmax + dsamp/2.0
fullbkpt_orig.sort()
# Compute the distance between breakpoints
dsamp_bkpt = fullbkpt_orig-np.roll(fullbkpt_orig, 1)
dsamp_bkpt[0] = dsamp_bkpt[1]
# Good breakpoints are those that are at least separated by our original desired bkpt spacing
igood = dsamp_bkpt >= bsp_min
if np.any(igood):
fullbkpt_orig = fullbkpt_orig[igood]
fullbkpt = fullbkpt_orig.copy()
# Recompute the distance between breakpoints
dsamp_bkpt = fullbkpt_orig-np.roll(fullbkpt_orig, 1)
dsamp_bkpt[0] = dsamp_bkpt[1]
nbkpt = fullbkpt_orig.size
for ibkpt in range(nbkpt):
dsamp_eff = np.fmax(dsamp_bkpt[ibkpt], bsp_min)
# can we fit in another bkpt?
if dsamp_bkpt[ibkpt] > 2*dsamp_eff:
nsmp = int(np.fmax(np.floor(dsamp_bkpt[ibkpt]/dsamp_eff),2))
bkpt_new = fullbkpt_orig[ibkpt - 1] + (np.arange(nsmp - 1) + 1)*dsamp_bkpt[ibkpt]/float(nsmp)
indx_arr = np.where(fullbkpt == fullbkpt_orig[ibkpt-1])[0]
if len(indx_arr) > 0:
indx_bkpt = indx_arr[0]
if indx_bkpt == 0:
fullbkpt = np.hstack((fullbkpt[0], bkpt_new, fullbkpt[indx_bkpt + 1:]))
elif indx_bkpt == (fullbkpt.size-2):
fullbkpt = np.hstack((fullbkpt[0:indx_bkpt], bkpt_new, fullbkpt[indx_bkpt + 1]))
else:
fullbkpt = np.hstack((fullbkpt[0:indx_bkpt], bkpt_new, fullbkpt[indx_bkpt + 1:]))
fullbkpt.sort()
keep = (fullbkpt >= piximg_min) & (fullbkpt <= piximg_max)
fullbkpt = fullbkpt[keep]
if debug:
plt.figure(figsize=(14, 6))
sky = skyimage[sampmask]
sky = sky[isrt]
# This is approximate and only for the sake of visualization:
spat_samp_vec = np.sum(sampmask, axis=1) # spatial sampling per spectral direction pixel
spat_samp_med = np.median(spat_samp_vec[spat_samp_vec > 0])
window_size = int(np.ceil(5 * spat_samp_med))
sky_med_filt = utils.fast_running_median(sky, window_size)
sky_bkpt_grid = np.interp(fullbkpt_grid, pix, sky_med_filt)
sky_bkpt = np.interp(fullbkpt, pix, sky_med_filt)
plt.clf()
ax = plt.gca()
ax.plot(pix, sky, color='k', marker='o', markersize=0.4, mfc='k', fillstyle='full', linestyle='None')
# ax.plot(pix, sky_med_filt, color='cornflowerblue', label='median sky', linewidth=1.2)
if used_grid == False:
ax.plot(fullbkpt_grid, sky_bkpt_grid, color='lawngreen', marker='o', markersize=2.0, mfc='lawngreen',
fillstyle='full', linestyle='None', label='uniform bkpt grid')
color = 'red'
title_str = ''
else:
color = 'lawngreen'
title_str = 'Used Grid: '
ax.plot(fullbkpt, sky_bkpt, color=color, marker='o', markersize=4.0, mfc=color,
fillstyle='full', linestyle='None', label='optimal bkpts')
ax.set_ylim((0.99 * sky_med_filt.min(), 1.01 * sky_med_filt.max()))
if min_spat is not None:
plt.title(title_str + 'bkpt sampling spat pixels {:7.1f}-{:7.1f}'.format(min_spat, max_spat))
plt.legend()
plt.show()
return fullbkpt
def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, slit_left,
slit_righ, sobjs, ingpm=None, spat_pix=None, adderr=0.01, bsp=0.6,
trim_edg=(3,3), std=False, prof_nsigma=None, niter=4,
extract_good_frac=0.005, sigrej=3.5, bkpts_optimal=True,
debug_bkpts=False, force_gauss=False, sn_gauss=4.0, model_full_slit=False,
model_noise=True, show_profile=False, show_resids=False,
use_2dmodel_mask=True, no_local_sky=False, base_var=None,
count_scale=None):
r"""
Perform local sky subtraction and extraction
IMPROVE THIS DOCSTRING
Parameters
----------
sciimg : `numpy.ndarray`_
science image, usually with a global sky subtracted.
shape = (nspec, nspat)
sciivar : `numpy.ndarray`_
inverse variance of science image.
shape = (nspec, nspat)
tilts : `numpy.ndarray`_
spectral tilts.
shape=(nspec, nspat)
waveimg : `numpy.ndarray`_
2-d wavelength map
global_sky : `numpy.ndarray`_
Global sky model produced by global_skysub
thismask : `numpy.ndarray`_
Specifies pixels in the slit in question
slit_left : `numpy.ndarray`_
Left slit boundary in floating point pixels.
shape (nspec, 1) or (nspec)
slit_righ : `numpy.ndarray`_
Right slit boundary in floating point pixels.
shape (nspec, 1) or (nspec)
sobjs : :class:`~pypeit.specobjs.SpecoObjs` object
Object containing the information about the objects found on the
slit/order from objfind or ech_objfind
ingpm : `numpy.ndarray`_, optional
Input mask with any non-zero item flagged as False using
:class:`pypeit.images.imagebitmask.ImageBitMask`
shape=(nspec, nspat)
spat_pix: `numpy.ndarray`_, optional
Image containing the spatial location of pixels. If not
input, it will be computed from ``spat_img =
np.outer(np.ones(nspec), np.arange(nspat))``. This option
should generally not be used unless one is extracting 2d
coadds for which a rectified image contains sub-pixel
spatial information.
shape (nspec, nspat)
adderr : float, default = 0.01
Error floor. The quantity adderr**2*sciframe**2 is added to the variance
to ensure that the S/N is never > 1/adderr, effectively setting a floor
on the noise or a ceiling on the S/N. This is one of the components
needed to construct the model variance (this is the same as
``noise_floor`` in :func:`~pypeit.core.procimg.variance_model`); see
``model_noise``.
bsp : float, default = 0.6
Break point spacing in pixels for the b-spline sky subtraction.
trim_edg : tuple of ints of floats, default = (3,3)
Number of pixels to be ignored on the (left,right) edges of
the slit in object/sky model fits.
std : bool, default = False
This should be set to True if the object being extracted is
a standards star so that the reduction parameters can be
adjusted accordingly.
prof_nsigma : int or float, default = None
Number of sigmas that the object profile will be fit, i.e.
the region extending from -prof_nsigma to +prof_nsigma will
be fit where sigma = FWHM/2.35. This option should only be
used for bright large extended source with tails in their
light profile like elliptical galaxies. If prof_nsigma is
set then the profiles will no longer be apodized by an
exponential at large distances from the trace.
niter : int, default = 4
Number of iterations for successive profile fitting and local sky-subtraction
extract_good_frac: float, default = 0.005
Minimum fraction of pixels along the spectral direction with good
optimal extraction
sigrej : :obj:`float`, optional
Outlier rejection threshold for sky and object fitting
Set by par['scienceimage']['skysub']['sky_sigrej']
bkpts_optimal : bool, optional
Parameter governing whether spectral direction breakpoints
for b-spline sky/object modeling are determined optimally.
If ``bkpts_optimal=True``, the optimal break-point spacing
will be determined directly using the optimal_bkpts function
by measuring how well we are sampling the sky using ``piximg
= (nspec-1)*yilyd``. The bsp parameter in this case
corresponds to the minimum distance between breakpoints
which we allow. If ``bkpts_optimal = False``, the
break-points will be chosen to have a uniform spacing in
pixel units sets by the bsp parameter, i.e. using the
bkspace functionality of the bspline class::
bset = bspline.bspline(piximg_values, nord=4, bkspace=bsp)
fullbkpt = bset.breakpoints
debug_bkpts : bool, default=False
Make an interactive plot to the screen to indicate how the
breakpoints are being chosen.
force_gauss : bool, default = False
If True, a Gaussian profile will always be assumed for the optimal
extraction using the FWHM determined from object finding (or provided by
the user) for the spatial profile.
sn_gauss : int or float, default = 4.0
The signal to noise threshold above which optimal extraction
with non-parametric b-spline fits to the objects spatial
profile will be performed. For objects with median S/N <
sn_gauss, a Gaussian profile will simply be assumed because
there is not enough S/N to justify performing a more
complicated fit.
model_full_slit : bool, default = False
Set the maskwidth of the objects to be equal to the slit
width/2 such that the entire slit will be modeled by the
local skysubtraction. This mode is recommended for echelle
spectra with reasonably narrow slits.
model_noise : bool, default = True
If True, construct and iteratively update a model inverse variance image
using :func:`~pypeit.core.procimg.variance_model`. Construction of the
model variance *requires* ``base_var``, and will use the provided values
or defaults for the remaining
:func:`~pypeit.core.procimg.variance_model` parameters. If False, a
variance model will not be created and instead the input sciivar will
always be taken to be the inverse variance. Note that in order for the
noise model to make any sense one needs to be subtracting the sky and
*not* the sky residuals. In other words, for near-IR reductions where
difference imaging has been performed and this algorithm is used to fit
out the sky residuals (but not the sky itself) one should definitely set
model_noise=False since otherwise the code will attempt to create a
noise model using sky residuals instead of the sky, which is incorrect
(does not have the right count levels). In principle this could be
improved if the user could pass in a model of what the sky is for
near-IR difference imaging + residual subtraction
show_profile : bool, default=False
Show QA for the object profile fitting to the screen. Note
that this will show interactive matplotlib plots which will
block the execution of the code until the window is closed.
show_resids : bool, optional
Show the model fits and residuals.
use_2dmodel_mask : bool, optional
Use the mask made from profile fitting when extracting?
no_local_sky : bool, optional
If True, do not fit local sky model, only object profile and extract optimally
The objimage will be all zeros.
base_var : `numpy.ndarray`_, shape is (nspec, nspat), optional
The "base-level" variance in the data set by the detector properties and
the image processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `numpy.ndarray`_, optional
A scale factor, :math:`s`, that *has already been applied* to the
provided science image. It accounts for the number of frames contributing to
the provided counts, and the relative throughput factors that can be measured
from flat-field frames. For example, if the image has been flat-field
corrected, this is the inverse of the flat-field counts. If None, set
to 1. If a single float, assumed to be constant across the full image.
If an array, the shape must match ``base_var``. The variance will be 0
wherever :math:`s \leq 0`, modulo the provided ``adderr``. This is one
of the components needed to construct the model variance; see
``model_noise``.
Returns
-------
skyimage : `numpy.ndarray`_
Model sky flux where ``thismask`` is true.
objimage : `numpy.ndarray`_
Model object flux where ``thismask`` is true.
modelivar : `numpy.ndarray`_
Model inverse variance where ``thismask`` is true.
outmask : :class:`~pypeit.images.imagebitmask.ImageBitMaskArray`
Copy of ``fullmask`` but with added flags were the image was extracted.
"""
# Check input
if model_noise and base_var is None:
msgs.error('Must provide base_var to iteratively update and improve the noise model.')
if base_var is not None and base_var.shape != sciimg.shape:
msgs.error('Base variance array does not match science image array shape.')
# TODO Force traces near edges to always be extracted with a Gaussian profile.
# TODO -- This should be using the SlitTraceSet method
ximg, edgmask = pixels.ximg_and_edgemask(slit_left, slit_righ, thismask, trim_edg=trim_edg)
nspat = sciimg.shape[1]
nspec = sciimg.shape[0]
piximg = tilts * (nspec-1)
# Copy the specobjs that will be the output
nobj = len(sobjs)
# Set up the prof_nsigma
if (prof_nsigma is None):
prof_nsigma1 = np.full(len(sobjs), None)
elif np.size(prof_nsigma) == 1:
prof_nsigma1 = np.full(nobj, prof_nsigma)
elif np.size(prof_nsigma) == nobj:
prof_nsigma1 = prof_nsigma
else:
raise ValueError('Invalid size for prof_nsigma.')
for iobj in range(nobj):
sobjs[iobj].prof_nsigma = prof_nsigma1[iobj]
# Set some rejection parameters based on whether this is a standard or not. Only reject extreme outliers for standards
# since super high S/N and low order profile models imply we will always have large outliers
if std is True:
chi2_sigrej = 100.0
#sigrej_ceil = 1e10
sigrej = 50.0 # 25 wasn't enough for MagE 2x2 binning (probably undersampled)
else:
# TODO Why is this not an input parameter
chi2_sigrej = 6.0
#sigrej_ceil = 10.0
# We will use this number later
gauss_prob = 1.0 - 2.0 * scipy.special.ndtr(-sigrej)
# Create the images that will be returned
modelivar = np.copy(sciivar)
objimage = np.zeros_like(sciimg)
skyimage = np.copy(global_sky)
# Masks
if ingpm is None:
ingpm = (sciivar > 0.0) & thismask & np.isfinite(sciimg) & np.isfinite(sciivar)
inmask = ingpm & thismask
outmask = np.copy(inmask) # True is good
# TODO Add a line of code here that updates the modelivar using the global sky if nobj = 0 and simply returns
spat_img = np.outer(np.ones(nspec), np.arange(nspat))
if spat_pix is None:
spat_pix = spat_img
xsize = slit_righ - slit_left
# TODO Can this be simply replaced with spat_img above (but not spat_pix since that could have holes)
spatial_img = thismask * ximg * (np.outer(xsize, np.ones(nspat)))
# First, we find all groups of objects to local skysubtract together
groups = sobjs.get_extraction_groups(model_full_slit=model_full_slit)
for group in groups:
if model_full_slit:
# If we're modelling the full slit, update the entire slit.
min_spat1 = slit_left
max_spat1 = slit_righ
else:
# The default value of maskwidth = 4.0 * FWHM = 9.4 * sigma in objfind with a log(S/N) correction for bright objects
# But the width can be adjusted with `par['reduce']['skysub']['local_maskwidth']`
left_edges = np.array([sobjs[i].TRACE_SPAT - sobjs[i].maskwidth - 1 for i in group])
righ_edges = np.array([sobjs[i].TRACE_SPAT + sobjs[i].maskwidth + 1 for i in group])
min_spat1 = np.maximum(np.amin(left_edges, axis=0), slit_left)
max_spat1 = np.minimum(np.amax(righ_edges, axis=0), slit_righ)
# Create the local mask which defines the pixels that will be updated by local sky subtraction
min_spat_img = min_spat1[:, None]
max_spat_img = max_spat1[:, None]
localmask = (spat_img > min_spat_img) & (spat_img < max_spat_img) & thismask
npoly = skysub_npoly(localmask)
# Some bookeeping to define the sub-image and make sure it does not land off the mask
objwork = len(group)
scope = np.sum(thismask, axis=0)
iscp, = np.where(scope)
imin = iscp.min()
imax = iscp.max()
min_spat = np.fmax(np.floor(min(min_spat1)), imin)
max_spat = np.fmin(np.ceil(max(max_spat1)), imax)
nc = int(max_spat - min_spat + 1)
spec_vec = np.arange(nspec, dtype=int) #np.intp)
spat_vec = np.arange(min_spat, min_spat + nc, dtype=int) #np.intp)
ipix = np.ix_(spec_vec, spat_vec)
obj_profiles = np.zeros((nspec, nspat, objwork), dtype=float)
sigrej_eff = sigrej
for iiter in range(1, niter + 1):
msgs.info('--------------------------REDUCING: Iteration # ' + '{:2d}'.format(iiter) + ' of ' +
'{:2d}'.format(niter) + '---------------------------------------------------')
img_minsky = sciimg - skyimage
for ii in range(objwork):
iobj = group[ii]
if iiter == 1:
# If this is the first iteration, print status message. Initiate profile fitting with a simple
# boxcar extraction.
msgs.info("----------------------------------- PROFILE FITTING --------------------------------------------------------")
msgs.info("Fitting profile for obj # " + "{:}".format(sobjs[iobj].OBJID) + " of {:}".format(nobj))
msgs.info("At x = {:5.2f}".format(sobjs[iobj].SPAT_PIXPOS) + " on slit # {:}".format(sobjs[iobj].slit_order))
msgs.info("------------------------------------------------------------------------------------------------------------")
# TODO -- Use extract_specobj_boxcar to avoid code duplication
extract.extract_boxcar(sciimg, modelivar, outmask, waveimg, skyimage,
sobjs[iobj], base_var=base_var, count_scale=count_scale,
noise_floor=adderr)
flux = sobjs[iobj].BOX_COUNTS
fluxivar = sobjs[iobj].BOX_COUNTS_IVAR * sobjs[iobj].BOX_MASK
wave = sobjs[iobj].BOX_WAVE
else:
# For later iterations, profile fitting is based on an optimal extraction
last_profile = obj_profiles[:, :, ii]
trace = sobjs[iobj].TRACE_SPAT[:, None]
objmask = ((spat_img >= (trace - 2.0 * sobjs[iobj].BOX_RADIUS)) & (spat_img <= (trace + 2.0 * sobjs[iobj].BOX_RADIUS)))
# Boxcar
extract.extract_boxcar(sciimg, modelivar, (outmask & objmask), waveimg,
skyimage, sobjs[iobj], base_var=base_var,
count_scale=count_scale, noise_floor=adderr)
# Optimal
extract.extract_optimal(sciimg, modelivar, (outmask & objmask), waveimg,
skyimage, thismask, last_profile, sobjs[iobj],
base_var=base_var, count_scale=count_scale,
noise_floor=adderr)
# If the extraction is bad do not update
if sobjs[iobj].OPT_MASK is not None:
# if there is only one good pixel `extract.fit_profile` fails
if np.sum(sobjs[iobj].OPT_MASK) > extract_good_frac * nspec:
flux = sobjs[iobj].OPT_COUNTS
fluxivar = sobjs[iobj].OPT_COUNTS_IVAR*sobjs[iobj].OPT_MASK
wave = sobjs[iobj].OPT_WAVE
obj_string = 'obj # {:}'.format(sobjs[iobj].OBJID) + ' on slit # {:}'.format(sobjs[iobj].slit_order) + ', iter # {:}'.format(iiter) + ':'
if wave.any():
sign = sobjs[iobj].sign
# TODO This is "sticky" masking. Do we want it to be?
profile_model, trace_new, fwhmfit, med_sn2 = extract.fit_profile(
sign*img_minsky[ipix], (modelivar * outmask)[ipix],waveimg[ipix], thismask[ipix], spat_pix[ipix], sobjs[iobj].TRACE_SPAT,
wave, sign*flux, fluxivar, inmask = outmask[ipix],
thisfwhm=sobjs[iobj].FWHM, prof_nsigma=sobjs[iobj].prof_nsigma, sn_gauss=sn_gauss, gauss=force_gauss, obj_string=obj_string,
show_profile=show_profile)
# Update the object profile and the fwhm and mask parameters
obj_profiles[ipix[0], ipix[1], ii] = profile_model
sobjs[iobj].TRACE_SPAT = trace_new
sobjs[iobj].FWHMFIT = fwhmfit
sobjs[iobj].FWHM = np.median(fwhmfit)
# TODO JFH In the xidl code the maskwidth was being updated which impacted the sub-image used for the
# fit_profile profile fitting. This is no longer the case in the python version. However, I'm leaving
# these lines here in case we decide to implement
# something like that.
#mask_fact = 1.0 + 0.5 * np.log10(np.fmax(np.sqrt(np.fmax(med_sn2, 0.0)), 1.0))
#maskwidth = extract_maskwidth*np.median(fwhmfit) * mask_fact
#sobjs[iobj].maskwidth = maskwidth if sobjs[iobj].prof_nsigma is None else \
# sobjs[iobj].prof_nsigma * (sobjs[iobj].FWHM / 2.3548)
else:
msgs.warn("Bad extracted wavelengths in local_skysub_extract")
msgs.warn("Skipping this profile fit and continuing.....")
# Fit the local sky
sky_bmodel = np.array(0.0)
iterbsp = 0
while (not sky_bmodel.any()) & (iterbsp <= 4) & (not no_local_sky):
bsp_now = (1.2 ** iterbsp) * bsp
fullbkpt = optimal_bkpts(bkpts_optimal, bsp_now, piximg, localmask, debug=(debug_bkpts & (iiter == niter)),
skyimage=skyimage, min_spat=min_spat, max_spat=max_spat)
# check to see if only a subset of the image is used.
# if so truncate input pixels since this can result in singular matrices
isub, = np.where(localmask.flatten())
#sortpix = (piximg.flat[isub]).argsort()
obj_profiles_flat = obj_profiles.reshape(nspec * nspat, objwork)
skymask = outmask & np.invert(edgmask)
sky_bmodel, obj_bmodel, outmask_opt = skyoptimal(
piximg.flat[isub], sciimg.flat[isub], (modelivar * skymask).flat[isub],
obj_profiles_flat[isub, :], spatial_img=spatial_img.flat[isub],
fullbkpt=fullbkpt, sigrej=sigrej_eff, npoly=npoly)
iterbsp = iterbsp + 1
if (not sky_bmodel.any()) & (iterbsp <= 3):
msgs.warn('***************************************')
msgs.warn('WARNING: bspline sky-subtraction failed')
msgs.warn('Increasing bkpt spacing by 20%. Retry')
msgs.warn(
'Old bsp = {:5.2f}'.format(bsp_now) + '; New bsp = {:5.2f}'.format(1.2 ** (iterbsp) * bsp))
msgs.warn('***************************************')
if sky_bmodel.any():
skyimage.flat[isub] = sky_bmodel
objimage.flat[isub] = obj_bmodel
img_minsky.flat[isub] = sciimg.flat[isub] - sky_bmodel
igood1 = skymask.flat[isub]
# update the outmask for only those pixels that were fit. This prevents masking of slit edges in outmask
outmask.flat[isub[igood1]] = outmask_opt[igood1]
# For weighted co-adds, the variance of the image is no longer equal to the image, and so the modelivar
# eqn. below is not valid. However, co-adds already have the model noise propagated correctly in sciivar,
# so no need to re-model the variance.
if model_noise:
_base_var = None if base_var is None else base_var.flat[isub]
_count_scale = None if count_scale is None else count_scale.flat[isub]
# NOTE: darkcurr must be a float for the call below to work.
var = procimg.variance_model(_base_var, counts=sky_bmodel+obj_bmodel,
count_scale=_count_scale, noise_floor=adderr)
modelivar.flat[isub] = utils.inverse(var)
# Now do some masking based on this round of model fits
chi2 = (img_minsky.flat[isub] - obj_bmodel) ** 2 * modelivar.flat[isub]
igood = (skymask.flat[isub]) & (chi2 <= chi2_sigrej ** 2)
ngd = np.sum(igood)
if ngd > 0:
chi2_good = chi2[igood]
chi2_srt = np.sort(chi2_good)
sigind = np.fmin(int(np.rint(gauss_prob * float(ngd))), ngd - 1)
chi2_sigrej = chi2_srt[sigind]
sigrej_eff = np.fmax(np.sqrt(chi2_sigrej), sigrej)
# Maximum sigrej is sigrej_ceil (unless this is a standard)
#sigrej_eff = np.fmin(sigrej_eff, sigrej_ceil)
msgs.info('Measured effective rejection from distribution of chi^2')
msgs.info('Instead of rejecting sigrej = {:5.2f}'.format(sigrej) +
', use threshold sigrej_eff = {:5.2f}'.format(sigrej_eff))
# Explicitly mask > sigrej outliers using the distribution of chi2 but only in the region that was actually fit.
# This prevents e.g. excessive masking of slit edges
outmask.flat[isub[igood1]] = outmask.flat[isub[igood1]] & (chi2[igood1] < chi2_sigrej) & (
sciivar.flat[isub[igood1]] > 0.0)
nrej = outmask.flat[isub[igood1]].sum()
msgs.info(
'Iteration = {:d}'.format(iiter) + ', rejected {:d}'.format(nrej) + ' of ' + '{:d}'.format(
igood1.sum()) + ' fit pixels')
elif no_local_sky:
pass
else:
msgs.warn('ERROR: Bspline sky subtraction failed after 4 iterations of bkpt spacing')
msgs.warn(' Moving on......')
obj_profiles = np.zeros_like(obj_profiles)
isub, = np.where(localmask.flatten())
# Just replace with the global sky
skyimage.flat[isub] = global_sky.flat[isub]
outmask_extract = outmask if use_2dmodel_mask else inmask
# Now that the iterations of profile fitting and sky subtraction are completed,
# loop over the objwork objects in this grouping and perform the final extractions.
for ii in range(objwork):
iobj = group[ii]
msgs.info('Extracting obj # {:d}'.format(iobj + 1) + ' of {:d}'.format(nobj) +
' with objid = {:d}'.format(sobjs[iobj].OBJID) + ' on slit # {:d}'.format(sobjs[iobj].slit_order) +
' at x = {:5.2f}'.format(sobjs[iobj].SPAT_PIXPOS))
this_profile = obj_profiles[:, :, ii]
trace = sobjs[iobj].TRACE_SPAT[:, None]
# Optimal
objmask = ((spat_img >= (trace - 2.0 * sobjs[iobj].BOX_RADIUS)) & (spat_img <= (trace + 2.0 * sobjs[iobj].BOX_RADIUS)))
extract.extract_optimal(sciimg, modelivar * thismask, (outmask_extract & objmask),
waveimg, skyimage, thismask, this_profile, sobjs[iobj],
base_var=base_var, count_scale=count_scale,
noise_floor=adderr)
# Boxcar
extract.extract_boxcar(sciimg, modelivar*thismask, (outmask_extract & objmask),
waveimg, skyimage, sobjs[iobj], base_var=base_var,
count_scale=count_scale, noise_floor=adderr)
sobjs[iobj].min_spat = min_spat
sobjs[iobj].max_spat = max_spat
# If requested display the model fits for this slit
if show_resids:
viewer, ch = display.show_image((sciimg - skyimage - objimage) * np.sqrt(modelivar) * thismask, chname='residuals')
# TODO add error checking here to see if ginga exists
canvas = viewer.canvas(ch._chname)
out1 = canvas.clear()
out2 = ch.cut_levels(-5.0, 5.0)
out3 = ch.set_color_algorithm('linear')
# Overplot the traces
for spec in sobjs:
if spec.hand_extract_flag is False:
color = 'magenta'
else:
color = 'orange'
display.show_trace(viewer, ch, spec.TRACE_SPAT, spec.NAME, color=color)
# These are the pixels that were masked by the extraction
spec_mask, spat_mask = np.where((outmask == False) & (inmask == True))
nmask = len(spec_mask)
# note: must cast numpy floats to regular python floats to pass the remote interface
points_mask = [dict(type='point', args=(float(spat_mask[i]), float(spec_mask[i]), 2),
kwargs=dict(style='plus', color='red')) for i in range(nmask)]
# These are the pixels that were originally masked
spec_omask, spat_omask = np.where((inmask == False) & (thismask == True))
nomask = len(spec_omask)
# note: must cast numpy floats to regular python floats to pass the remote interface
points_omask = [dict(type='point', args=(float(spat_omask[i]), float(spec_omask[i]), 2),
kwargs=dict(style='plus', color='cyan')) for i in range(nomask)]
# Labels for the points
text_mask = [dict(type='text', args=(nspat / 2, nspec / 2, 'masked by extraction'),
kwargs=dict(color='red', fontsize=20))]
text_omask = [dict(type='text', args=(nspat / 2, nspec / 2 + 30, 'masked initially'),
kwargs=dict(color='cyan', fontsize=20))]
canvas_list = points_mask + points_omask + text_mask + text_omask
canvas.add('constructedcanvas', canvas_list)
return skyimage[thismask], objimage[thismask], modelivar[thismask], outmask[thismask]
def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg,
global_sky, left, right,
slitmask, sobjs, order_vec, spat_pix=None,
fit_fwhm=False,
min_snr=2.0, bsp=0.6, trim_edg=(3,3), std=False, prof_nsigma=None,
niter=4, sigrej=3.5, bkpts_optimal=True,
force_gauss=False, sn_gauss=4.0, model_full_slit=False,
model_noise=True, debug_bkpts=False, show_profile=False,
show_resids=False, show_fwhm=False, adderr=0.01, base_var=None,
count_scale=None):
"""
Perform local sky subtraction, profile fitting, and optimal extraction slit by slit
IMPROVE THIS DOCSTRING
Parameters
----------
sciimg : `numpy.ndarray`_
science image, usually with a global sky subtracted.
shape = (nspec, nspat)
sciivar : `numpy.ndarray`_
inverse variance of science image.
shape = (nspec, nspat)
fullmask : :class:`~pypeit.images.imagebitmask.ImageBitMaskArray`
Image bitmask array.
tilts : `numpy.ndarray`_
spectral tilts.
shape=(nspec, nspat)
waveimg : `numpy.ndarray`_
2-d wavelength map
global_sky : `numpy.ndarray`_
Global sky model produced by global_skysub
left : `numpy.ndarray`_
Spatial-pixel coordinates for the left edges of each
order.
right : `numpy.ndarray`_
Spatial-pixel coordinates for the right edges of each
order.
slitmask : `numpy.ndarray`_
Image identifying the 0-indexed order associated with
each pixel. Pixels with -1 are not associatead with any
order.
sobjs : :class:`~pypeit.specobjs.SpecoObjs` object
Object containing the information about the objects found on the
slit/order from objfind or ech_objfind
order_vec: `numpy.ndarray`_
Vector of order numbers
spat_pix: `numpy.ndarray`_, optional
Image containing the spatial location of pixels. If not
input, it will be computed from ``spat_img =
np.outer(np.ones(nspec), np.arange(nspat))``. This option
should generally not be used unless one is extracting 2d
coadds for which a rectified image contains sub-pixel
spatial information.
shape (nspec, nspat)
fit_fwhm: bool, optional
if True, perform a fit to the FWHM of the object profiles
to use for non-detected sources
min_snr: float, optional
FILL IN
bsp : float, default = 0.6
Break point spacing in pixels for the b-spline sky subtraction.
trim_edg : tuple of ints of floats, default = (3,3)
Number of pixels to be ignored on the (left,right) edges of
the slit in object/sky model fits.
std : bool, default = False
This should be set to True if the object being extracted is
a standards star so that the reduction parameters can be
adjusted accordingly.
prof_nsigma : int or float, default = None
Number of sigmas that the object profile will be fit, i.e.
the region extending from -prof_nsigma to +prof_nsigma will
be fit where sigma = FWHM/2.35. This option should only be
used for bright large extended source with tails in their
light profile like elliptical galaxies. If prof_nsigma is
set then the profiles will no longer be apodized by an
exponential at large distances from the trace.
niter : int, optional
Number of iterations for successive profile fitting and local sky-subtraction
sigrej : :obj:`float`, optional
Outlier rejection threshold for sky and object fitting
Set by par['scienceimage']['skysub']['sky_sigrej']
bkpts_optimal : bool, optional
Parameter governing whether spectral direction breakpoints
for b-spline sky/object modeling are determined optimally.
If ``bkpts_optima=True``, the optimal break-point spacing
will be determined directly using the optimal_bkpts function
by measuring how well we are sampling the sky using ``piximg
= (nspec-1)*yilyd``. The bsp parameter in this case
corresponds to the minimum distance between breakpoints
which we allow. If ``bkpts_optimal = False``, the
break-points will be chosen to have a uniform spacing in
pixel units sets by the bsp parameter, i.e. using the
bkspace functionality of the bspline class::
bset = bspline.bspline(piximg_values, nord=4, bkspace=bsp)
fullbkpt = bset.breakpoints
force_gauss : bool, default = False
If True, a Gaussian profile will always be assumed for the
optimal extraction using the FWHM determined from object finding (or provided by the user) for the spatial
profile.
sn_gauss : int or float, default = 4.0
The signal to noise threshold above which optimal extraction
with non-parametric b-spline fits to the objects spatial
profile will be performed. For objects with median S/N <
sn_gauss, a Gaussian profile will simply be assumed because
there is not enough S/N to justify performing a more
complicated fit.
model_full_slit : bool, default = False
Set the maskwidth of the objects to be equal to the slit
width/2 such that the entire slit will be modeled by the
local skysubtraction. This mode is recommended for echelle
spectra with reasonably narrow slits.
model_noise : bool, default = True
If True, construct and iteratively update a model inverse variance image
using :func:`~pypeit.core.procimg.variance_model`. Construction of the
model variance *requires* ``base_var``, and will use the provided values
or defaults for the remaining
:func:`~pypeit.core.procimg.variance_model` parameters. If False, a
variance model will not be created and instead the input sciivar will
always be taken to be the inverse variance. Note that in order for the
noise model to make any sense one needs to be subtracting the sky and
*not* the sky residuals. In other words, for near-IR reductions where
difference imaging has been performed and this algorithm is used to fit
out the sky residuals (but not the sky itself) one should definitely set
model_noise=False since otherwise the code will attempt to create a
noise model using sky residuals instead of the sky, which is incorrect
(does not have the right count levels). In principle this could be
improved if the user could pass in a model of what the sky is for
near-IR difference imaging + residual subtraction
debug_bkpts:
show_profile : bool, default=False
Show QA for the object profile fitting to the screen. Note
that this will show interactive matplotlib plots which will
block the execution of the code until the window is closed.
show_resids : bool, optional
Show the model fits and residuals.
show_fwhm:
adderr : float, default = 0.01
Error floor. The quantity adderr**2*sciframe**2 is added to the variance
to ensure that the S/N is never > 1/adderr, effectively setting a floor
on the noise or a ceiling on the S/N. This is one of the components
needed to construct the model variance (this is the same as
``noise_floor`` in :func:`~pypeit.core.procimg.variance_model`); see
``model_noise``.
base_var : `numpy.ndarray`_, shape is (nspec, nspat), optional
The "base-level" variance in the data, set by the detector properties and
the image processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `numpy.ndarray`_, optional
A scale factor that *has already been applied* to the provided science
image. It accounts for the number of frames contributing to
the provided counts, and the relative throughput factors that can be measured
from flat-field frames. For example, if the image has been flat-field corrected,
this is the inverse of the flat-field counts. If None, set to 1. If a single
float, assumed to be constant across the full image. If an array, the
shape must match ``base_var``. The variance will be 0 wherever this
array is not positive, modulo the provided ``adderr``. This is one of
the components needed to construct the model variance; see
``model_noise``.
Returns
-------
skyimage : `numpy.ndarray`_
Model sky flux where ``thismask`` is true.
objimage : `numpy.ndarray`_
Model object flux where ``thismask`` is true.
ivarmodel : `numpy.ndarray`_
Model inverse variance where ``thismask`` is true.
outmask : `numpy.ndarray`_
Model mask where ``thismask`` is true.
sobjs : :class:`~pypeit.specobjs.SpecoObjs` object
Same object as passed in
"""
# Allocate the images that are needed
# Initialize to mask in case no objects were found
outmask = fullmask.copy()
extractmask = fullmask.flagged(invert=True)
# TODO case of no objects found should be properly dealt with by local_skysub_extract
# Initialize to zero in case no objects were found
objmodel = np.zeros_like(sciimg)
# Set initially to global sky in case no objects were found
skymodel = np.copy(global_sky)
# Set initially to sciivar in case no obects were found.
ivarmodel = np.copy(sciivar)
sobjs = sobjs.copy()
norders = order_vec.size
slit_vec = np.arange(norders)
# Find the spat IDs
gdslit_spat = np.unique(slitmask[slitmask >= 0]).astype(int) # Unique sorts
#if gdslit_spat.size != norders:
# msgs.error("You have not dealt with masked orders properly")
#if (np.sum(sobjs.sign > 0) % norders) == 0:
# nobjs = int((np.sum(sobjs.sign > 0)/norders))
#else:
# msgs.error('Number of specobjs in sobjs is not an integer multiple of the number or ordres!')
# Set bad obj to -nan
uni_objid = np.unique(sobjs[sobjs.sign > 0].ECH_OBJID)
nobjs = len(uni_objid)
order_snr = np.zeros((norders, nobjs))
order_snr_gpm = np.ones_like(order_snr)
for iord in range(norders):
for iobj in range(nobjs):
ind = (sobjs.ECH_ORDERINDX == iord) & (sobjs.ECH_OBJID == uni_objid[iobj])
# Allow for missed/bad order
if np.sum(ind) == 0:
order_snr_gpm[iord,iobj] = False
else:
order_snr[iord,iobj] = sobjs[ind].ech_snr
# Compute the average SNR and find the brightest object
snr_bar = np.sum(order_snr,axis=0) / np.sum(order_snr_gpm,axis=0)
srt_obj = snr_bar.argsort()[::-1]
ibright = srt_obj[0] # index of the brightest object
# Now extract the orders in descending order of S/N for the brightest object
srt_order_snr = order_snr[:,ibright].argsort()[::-1]
fwhm_here = np.zeros(norders)
fwhm_was_fit = np.zeros(norders,dtype=bool)
# Print out a status message
str_out = ''
for iord in srt_order_snr:
if order_snr_gpm[iord,ibright]:
str_out += '{:<8d}{:<8d}{:>10.2f}'.format(slit_vec[iord], order_vec[iord], order_snr[iord,ibright]) + msgs.newline()
dash = '-'*27
dash_big = '-'*40
msgs.info(msgs.newline() + 'Reducing orders in order of S/N of brightest object:' + msgs.newline() + dash +
msgs.newline() + '{:<8s}{:<8s}{:>10s}'.format('slit','order','S/N') + msgs.newline() + dash +
msgs.newline() + str_out)
# Loop over orders in order of S/N ratio (from highest to lowest) for the brightest object
for iord in srt_order_snr:
# Is this a bad slit?
if not np.any(order_snr_gpm[iord,:]):
continue
order = order_vec[iord]
msgs.info("Local sky subtraction and extraction for slit/order: {:d}/{:d}".format(iord,order))
other_orders = (fwhm_here > 0) & np.invert(fwhm_was_fit)
other_fit = (fwhm_here > 0) & fwhm_was_fit
# Loop over objects in order of S/N ratio (from highest to lowest)
for iobj in srt_obj:
if (order_snr[iord, iobj] <= min_snr) & (np.sum(other_orders) >= 3):
if iobj == ibright:
# If this is the brightest object then we extrapolate the FWHM from a fit
#fwhm_coeffs = np.polyfit(order_vec[other_orders], fwhm_here[other_orders], 1)
#fwhm_fit_eval = np.poly1d(fwhm_coeffs)
#fwhm_fit = fwhm_fit_eval(order_vec[iord])
fwhm_was_fit[iord] = True
# Either perform a linear fit to the FWHM or simply take the median
if fit_fwhm:
minx = 0.0
maxx = fwhm_here[other_orders].max()
# ToDO robust_poly_fit needs to return minv and maxv as outputs for the fits to be usable downstream
#fit_mask, fwhm_coeffs = fitting.robust_fit(order_vec[other_orders], fwhm_here[other_orders],1,
pypeitFit = fitting.robust_fit(order_vec[other_orders], fwhm_here[other_orders],1,
function='polynomial',maxiter=25,lower=2.0, upper=2.0,
maxrej=1,sticky=False, minx=minx, maxx=maxx)
fwhm_this_ord = pypeitFit.eval(order_vec[iord])#, 'polynomial', minx=minx, maxx=maxx)
fwhm_all = pypeitFit.eval(order_vec)#, 'polynomial', minx=minx, maxx=maxx)
fwhm_str = 'linear fit'
else:
fit_mask = np.ones_like(order_vec[other_orders],dtype=bool)
fwhm_this_ord = np.median(fwhm_here[other_orders])
fwhm_all = np.full(norders,fwhm_this_ord)
fwhm_str = 'median '
indx = (sobjs.ECH_OBJID == uni_objid[iobj]) & (sobjs.ECH_ORDERINDX == iord)
for spec in sobjs[indx]:
spec.FWHM = fwhm_this_ord
str_out = ''
for slit_now, order_now, snr_now, fwhm_now in zip(
slit_vec[other_orders], order_vec[other_orders],
order_snr[other_orders,ibright],
fwhm_here[other_orders]):
str_out += '{:<8d}{:<8d}{:>10.2f}{:>10.2f}'.format(slit_now, order_now, snr_now, fwhm_now) + msgs.newline()
msgs.info(msgs.newline() + 'Using' + fwhm_str + ' for FWHM of object={:d}'.format(uni_objid[iobj]) +
' on slit/order: {:d}/{:d}'.format(iord,order) + msgs.newline() + dash_big +
msgs.newline() + '{:<8s}{:<8s}{:>10s}{:>10s}'.format('slit', 'order','SNR','FWHM') +
msgs.newline() + dash_big +
msgs.newline() + str_out[:-8] +
fwhm_str.upper() + ':{:<8d}{:<8d}{:>10.2f}{:>10.2f}'.format(iord, order, order_snr[iord,ibright], fwhm_this_ord) +
msgs.newline() + dash_big)
if show_fwhm:
plt.plot(order_vec[other_orders][fit_mask], fwhm_here[other_orders][fit_mask], marker='o', linestyle=' ',
color='k', mfc='k', markersize=4.0, label='orders informing fit')
if np.any(np.invert(fit_mask)):
plt.plot(order_vec[other_orders][np.invert(fit_mask)],
fwhm_here[other_orders][np.invert(fit_mask)], marker='o', linestyle=' ',
color='magenta', mfc='magenta', markersize=4.0, label='orders rejected by fit')
if np.any(other_fit):
plt.plot(order_vec[other_fit], fwhm_here[other_fit], marker='o', linestyle=' ',
color='lawngreen', mfc='lawngreen',markersize=4.0, label='fits to other low SNR orders')
plt.plot([order_vec[iord]], [fwhm_this_ord], marker='o', linestyle=' ',color='red', mfc='red', markersize=6.0,label='this order')
plt.plot(order_vec, fwhm_all, color='cornflowerblue', zorder=10, linewidth=2.0, label=fwhm_str)
plt.legend()
plt.show()
else:
# If this is not the brightest object then assign it the FWHM of the brightest object
indx = np.where((sobjs.ECH_OBJID == uni_objid[iobj]) & (sobjs.ECH_ORDERINDX == iord))[0][0]
indx_bri = np.where((sobjs.ECH_OBJID == uni_objid[ibright]) & (sobjs.ECH_ORDERINDX == iord))[0][0]
spec = sobjs[indx]
spec.FWHM = sobjs[indx_bri].FWHM
thisobj = (sobjs.ECH_ORDERINDX == iord) # indices of objects for this slit
thismask = slitmask == gdslit_spat[iord] # pixels for this slit
# True = Good, False = Bad for inmask
inmask = fullmask.flagged(invert=True) & thismask
# Local sky subtraction and extraction
skymodel[thismask], objmodel[thismask], ivarmodel[thismask], extractmask[thismask] \
= local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask,
left[:,iord], right[:,iord], sobjs[thisobj],
spat_pix=spat_pix, ingpm=inmask, std=std, bsp=bsp,
trim_edg=trim_edg, prof_nsigma=prof_nsigma, niter=niter,
sigrej=sigrej,
bkpts_optimal=bkpts_optimal, force_gauss=force_gauss,
sn_gauss=sn_gauss, model_full_slit=model_full_slit,
model_noise=model_noise, debug_bkpts=debug_bkpts,
show_resids=show_resids, show_profile=show_profile,
adderr=adderr, base_var=base_var, count_scale=count_scale)
# update the FWHM fitting vector for the brighest object
indx = (sobjs.ECH_OBJID == uni_objid[ibright]) & (sobjs.ECH_ORDERINDX == iord)
fwhm_here[iord] = np.median(sobjs[indx].FWHMFIT)
# Did the FWHM get updated by the profile fitting routine in local_skysub_extract? If so, include this value
# for future fits
if np.abs(fwhm_here[iord] - sobjs[indx].FWHM) >= 0.01:
fwhm_was_fit[iord] = False
# Set the bit for pixels which were masked by the extraction.
# For extractmask, True = Good, False = Bad
iextract = fullmask.flagged(invert=True) & np.logical_not(extractmask)
# Undefined inverse variances
outmask.turn_on('EXTRACT', select=iextract)
# Return
return skymodel, objmodel, ivarmodel, outmask, sobjs
def read_userregions(skyreg, nslits, maxslitlength):
"""
Parse the sky regions defined by the user. The text should be a comma
separated list of percentages to apply to all slits.
Example
-------
The string ``':10,35:65,80:'`` would select (in all slits):
- the leftmost 10% of the slit length,
- the inner 30% (from 35-65% of the slit length), and
- the final 20% of the slit length (from 80-100% of the slit length)
Parameters
----------
skyreg : str
The sky region definition.
nslits : int
Number of slits on the detector
maxslitlength: float
The maximum slit length (in pixels).
Returns
-------
status : int
Status of the region parsing (0 = Successful, 1,2 = fail)
regions : list
A list of size nslits. Each element contains a numpy array (dtype=bool)
of size resolution. A True value indicates a value that is part of the
sky region.
"""
# Define the resolution of the sky region boundary to be at least a tenth of a pixel
resolution = int(10.0 * maxslitlength)
status = 0
regions = []
try:
skyreg = skyreg.split(",")
for tt in skyreg:
if ":" not in tt:
# Poor region definition - it should contain a semi-colon'
status = 2
break
tts = tt.split(":")
regions.append([0 if len(tts[0]) == 0 else int(
round((resolution - 1) * float(tts[0]) / 100.0)),
resolution if len(tts[1]) == 0 else int(
round((resolution - 1) * float(tts[1]) / 100.0))
])
# Initialise the sky regions - For each slit, generate a mask of size `resolution`.
# i.e. the spatial coordinate is sampled by `resolution` elements.
skyreg = [np.zeros(resolution, dtype=bool) for all in range(nslits)]
# For all regions, set the skyreg mask to True for each region
for reg in regions:
# Do some checks
xmin, xmax = reg[0], reg[1]
if xmax < xmin:
xmin, xmax = xmax, xmin
if xmin < 0:
xmin = 0
if xmax > resolution:
xmax = resolution
# Apply to all slits
for sl in range(nslits):
skyreg[sl][xmin:xmax] = True
except:
status = 1
# Return
return status, skyreg
def generate_mask(pypeline, skyreg, slits, slits_left, slits_right, spat_flexure=None):
"""Generate the mask of sky regions
Parameters
----------
pypeline : str
Name of the pypeline being used (e.g. MultiSlit, Echelle, IFU, ...)
skyreg : list
A list of size nslits. Each element contains a numpy array (dtype=bool)
where a True value indicates a value that is part of the sky region.
slits : :class:`SlitTraceSet`
Data container with slit trace information
slits_left : `numpy.ndarray`_
A 2D array containing the pixel coordinates of the left slit edges
slits_right : `numpy.ndarray`_
A 2D array containing the pixel coordinates of the right slit edges
resolution: int, optional
The percentage regions will be scaled to the specified resolution. The
resolution should probably correspond to the number of spatial pixels
on the slit.
Returns
-------
mask : `numpy.ndarray`_
Boolean mask containing sky regions
"""
# Grab the resolution that was used to generate skyreg
resolution = skyreg[0].size
# Using the left/right slit edge traces, generate a series of traces that mark the
# sky region boundaries in each slit.
nreg = 0
# Initialise the sky region traces (this contains *all* sky regions,
# regardless of which slit the sky regions falls in)
left_edg, righ_edg = np.zeros((slits.nspec, 0)), np.zeros((slits.nspec, 0))
spec_min, spec_max = np.array([]), np.array([])
for sl in range(slits.nslits):
# Calculate the slit width
diff = slits_right[:, sl] - slits_left[:, sl]
# Break up the slit into `resolution` subpixels
tmp = np.zeros(resolution+2)
tmp[1:-1] = skyreg[sl]
# Find all the left and right sky region traces in this slit
wl = np.where(tmp[1:] > tmp[:-1])[0]
wr = np.where(tmp[1:] < tmp[:-1])[0]
# Construct the left/right traces, and store them in the left_edg, right_edg arrays.
for rr in range(wl.size):
left = slits_left[:, sl] + wl[rr]*diff/(resolution-1.0)
righ = slits_left[:, sl] + wr[rr]*diff/(resolution-1.0)
left_edg = np.append(left_edg, left[:, np.newaxis], axis=1)
righ_edg = np.append(righ_edg, righ[:, np.newaxis], axis=1)
nreg += 1
spec_min = np.append(spec_min, slits.specmin[sl])
spec_max = np.append(spec_max, slits.specmax[sl])
# Now that we have sky region traces, utilise the SlitTraceSet to define the regions.
# We will then use the slit_img task to create a mask of the sky regions.
# TODO: I don't understand why slmsk needs to be instantiated. SlitTraceSet
# does this internally.
slmsk = np.zeros(left_edg.shape[1], dtype=slittrace.SlitTraceSet.bitmask.minimum_dtype())
slitreg = slittrace.SlitTraceSet(left_edg, righ_edg, pypeline, nspec=slits.nspec,
nspat=slits.nspat, mask=slmsk, specmin=spec_min,
specmax=spec_max, binspec=slits.binspec,
binspat=slits.binspat, pad=0)
# Generate the mask, and return
return (slitreg.slit_img(use_spatial=False, flexure=spat_flexure) >= 0).astype(bool)
|
313ae7fddb01eaa6434e21e224e8abfad36163a3
|
aae3c6fccb2296e4da5bb10310f5dd6baba8b7de
|
/activitysim/cli/test/test_cli.py
|
b8f0fc3842a09c2b3eeda7ba66fba5dfa688ed12
|
[
"BSD-3-Clause"
] |
permissive
|
ActivitySim/activitysim
|
3d938e616452be76db1bb0c8a1212e12b9216823
|
a8e755f96d0e32633a6d3657c4878e3b6a37e59a
|
refs/heads/main
| 2023-08-08T16:02:06.275693
| 2023-05-09T13:08:23
| 2023-05-09T13:08:23
| 20,981,950
| 118
| 89
|
BSD-3-Clause
| 2023-07-25T14:07:16
| 2014-06-18T23:57:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
test_cli.py
|
# ActivitySim
# See full license in LICENSE.txt.
import os
import shutil
import subprocess
import sys
import pytest
if sys.version_info < (3, 7):
pytest.skip("capture_output introduced in Python 3.7", allow_module_level=True)
def test_help():
# cp = completed process
cp = subprocess.run(["activitysim", "-h"], capture_output=True)
assert "usage: activitysim [-h] [--version]" in str(cp.stdout)
def test_create_help():
cp = subprocess.run(["activitysim", "create", "-h"], capture_output=True)
assert "usage: activitysim create [-h] (-l | -e PATH) [-d PATH]" in str(cp.stdout)
def test_create_list():
cp = subprocess.run(["activitysim", "create", "--list"], capture_output=True)
assert "Available examples" in str(cp.stdout)
assert "name: prototype_mtc" in str(cp.stdout)
def test_create_copy():
target = os.path.join(os.path.dirname(__file__), "test_example")
cp = subprocess.run(
[
"activitysim",
"create",
"--example",
"prototype_mtc",
"--destination",
target,
],
capture_output=True,
)
assert "copying data ..." in str(cp.stdout)
assert "copying configs ..." in str(cp.stdout)
assert "copying configs_mp ..." in str(cp.stdout)
assert "copying output ..." in str(cp.stdout)
# replace slashes on windows
assert str(target).replace("\\\\", "\\") in str(cp.stdout).replace("\\\\", "\\")
assert os.path.exists(target)
for folder in ["configs", "configs_mp", "data", "output"]:
assert os.path.isdir(os.path.join(target, folder))
# clean up
shutil.rmtree(target)
assert not os.path.exists(target)
def test_run():
cp = subprocess.run(["activitysim", "run"], capture_output=True)
msg = (
"please specify either a --working_dir "
"containing 'configs', 'data', and 'output' "
"folders or all three of --config, --data, and --output"
)
# expect error
assert msg in str(cp.stderr)
if __name__ == "__main__":
test_help()
test_create_help()
test_create_list()
test_create_copy()
test_run()
|
59ca4b05ab5ef1af209d2961972061ac00e45b1c
|
a935ec8dab61675b6a3e348511fb9f8c5d490026
|
/docs/conf.py
|
4a6b9aba5fe70c8fef13f13ae1b51c0d022f79e5
|
[
"MIT"
] |
permissive
|
libAudioFlux/audioFlux
|
d604c54941c17d90e1ef42f04ba353ef6e244926
|
3ae59434e2c1cacb6da43562677ed4899d7047c7
|
refs/heads/master
| 2023-04-28T05:17:22.649299
| 2023-04-25T09:45:59
| 2023-04-25T09:45:59
| 589,514,195
| 1,701
| 85
|
MIT
| 2023-04-25T09:00:05
| 2023-01-16T09:53:04
|
C
|
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
conf.py
|
import os
import sys
sys.path.insert(0, os.path.abspath("../python")) # NOQA
import audioflux
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'AudioFlux'
copyright = '2023, AudioFluxLib'
author = 'AudioFlux'
version = release = audioflux.__version__
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax",
"sphinx.ext.autosectionlabel",
"sphinx_rtd_theme",
"numpydoc",
"matplotlib.sphinxext.plot_directive",
]
plot_include_source = True
plot_html_show_formats = False
plot_html_show_source_link = False
plot_formats = [("png", 100)]
numpydoc_use_plots = True
numpydoc_show_class_members = True
numpydoc_class_members_toctree = False
# mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"
mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.3/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
default_role = "autolink"
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
language = 'en'
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_favicon = '../image/icon.png'
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
html_js_files = [
'js/custom.js',
]
html_context = {
"display_github": True, # Integrate GitHub
"github_repo": "libAudioFlux/audioflux", # Repo name
"github_version": "master", # Version
"conf_py_path": "/docs/", # Path in the checkout to the docs root
"switcher": {
"json_url": "https://audioflux.top/_static/versions.json"
}
}
html_theme_options = {
'analytics_id': 'G-PJ9LYQR6FG',
'analytics_anonymize_ip': True,
}
autodoc_member_order = 'bysource'
autosectionlabel_prefix_document = True
autoclass_content = 'class'
|
1967feb178c5425662dbf8b32faecbfbf72a1652
|
9803232b04daa00eb4038be338b833907fd1625f
|
/blender_bindings/source1/bsp/entities/abstract_entity_handlers.py
|
18e70fcf0abfe502233052fbed61f10da9e86902
|
[
"MIT"
] |
permissive
|
REDxEYE/SourceIO
|
a0ff3cff37504afdb906e4ee20c1077a8daf2912
|
85661fe057cef1ad2a779a9d48e810ea214f4f07
|
refs/heads/master
| 2023-08-08T18:35:28.771447
| 2023-08-07T22:26:59
| 2023-08-07T22:26:59
| 170,197,673
| 409
| 53
|
MIT
| 2023-08-23T18:40:38
| 2019-02-11T20:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 15,529
|
py
|
abstract_entity_handlers.py
|
import math
import re
from pathlib import Path
from pprint import pformat
from typing import List
import bpy
import numpy as np
from mathutils import Euler
from ....operators.import_settings_base import BSPSettings
from .....library.shared.content_providers.content_manager import \
ContentManager
from .....library.source1.bsp.bsp_file import BSPFile
from .....library.source1.bsp.datatypes.face import Face
from .....library.source1.bsp.datatypes.model import Model
from .....library.source1.bsp.datatypes.texture_data import TextureData
from .....library.source1.bsp.datatypes.texture_info import TextureInfo
from .....library.source1.vmt import VMT
from .....library.utils.math_utilities import SOURCE1_HAMMER_UNIT_TO_METERS
from .....logger import SLoggingManager
from ....utils.utils import add_material, get_or_create_collection
from ...vtf import import_texture
from .base_entity_classes import *
strip_patch_coordinates = re.compile(r"_-?\d+_-?\d+_-?\d+.*$")
log_manager = SLoggingManager()
def gather_vertex_ids(model: Model, faces: List[Face], surf_edges: np.ndarray, edges: np.ndarray):
vertex_offset = 0
material_ids = []
vertex_count = 0
for map_face in faces[model.first_face:model.first_face + model.face_count]:
vertex_count += map_face.edge_count
vertex_ids = np.zeros(vertex_count, dtype=np.uint16)
for map_face in faces[model.first_face:model.first_face + model.face_count]:
if map_face.disp_info_id != -1:
continue
first_edge = map_face.first_edge
edge_count = map_face.edge_count
material_ids.append(map_face.tex_info_id)
used_surf_edges = surf_edges[first_edge:first_edge + edge_count]
reverse = np.subtract(1, (used_surf_edges > 0).astype(np.uint8))
used_edges = edges[np.abs(used_surf_edges)]
tmp = np.arange(len(used_edges))
face_vertex_ids = used_edges[tmp, reverse]
vertex_ids[vertex_offset:vertex_offset + edge_count] = face_vertex_ids
vertex_offset += edge_count
return vertex_ids, material_ids
def _srgb2lin(s: float) -> float:
if s <= 0.0404482362771082:
lin = s / 12.92
else:
lin = pow(((s + 0.055) / 1.055), 2.4)
return lin
class AbstractEntityHandler:
entity_lookup_table = {}
def __init__(self, bsp_file: BSPFile, parent_collection,
world_scale: float = SOURCE1_HAMMER_UNIT_TO_METERS, light_scale: float = 1.0):
self.logger = log_manager.get_logger(self.__class__.__name__)
self._bsp: BSPFile = bsp_file
self.scale = world_scale
self.light_scale = light_scale
self.parent_collection = parent_collection
self._entites = self._bsp.get_lump('LUMP_ENTITIES').entities
self._handled_paths = []
self._entity_by_name_cache = {}
def load_entities(self, settings: BSPSettings):
entity_lump = self._bsp.get_lump('LUMP_ENTITIES')
for entity_data in entity_lump.entities:
entity_class: str = entity_data['classname']
if entity_class.startswith("info_") and not settings.load_info:
continue
elif "decal" in entity_class and not settings.load_decals:
continue
elif "light" in entity_class and not settings.load_lights:
continue
elif entity_class.startswith("trigger_") and not settings.load_triggers:
continue
elif entity_class.startswith("prop_") and not settings.load_props:
continue
elif entity_class.startswith("logic_") and not settings.load_logic:
continue
elif entity_class.endswith("rope") and not settings.load_ropes:
continue
if not self.handle_entity(entity_data):
self.logger.warn(pformat(entity_data))
bpy.context.view_layer.update()
# for entity_data in entity_lump.entities:
# self.resolve_parents(entity_data)
pass
def handle_entity(self, entity_data: dict):
entity_class = entity_data['classname']
if hasattr(self, f'handle_{entity_class}') and entity_class in self.entity_lookup_table:
entity_class_obj = self._get_class(entity_class)
entity_object = entity_class_obj(entity_data)
handler_function = getattr(self, f'handle_{entity_class}')
try:
handler_function(entity_object, entity_data)
except ValueError as e:
import traceback
self.logger.error(f'Exception during handling {entity_class} entity: {e.__class__.__name__}("{e}")')
self.logger.error(traceback.format_exc())
return False
return True
return False
def _get_entity_by_name(self, name):
if not self._entity_by_name_cache:
self._entity_by_name_cache = {e['targetname']: e for e in self._entites if 'targetname' in e}
entity = self._entity_by_name_cache.get(name, None)
if entity is None:
return None, None
entity_class = self._get_class(entity['classname'])
entity_obj = entity_class(entity)
return entity_obj, entity
def _get_string(self, string_id):
strings: List[str] = self._bsp.get_lump('LUMP_TEXDATA_STRING_TABLE').strings
return strings[string_id] or "NO_NAME"
def _load_brush_model(self, model_id, model_name):
model = self._bsp.get_lump("LUMP_MODELS").models[model_id]
mesh_obj = bpy.data.objects.new(model_name, bpy.data.meshes.new(f"{model_name}_MESH"))
mesh_data = mesh_obj.data
faces = []
material_indices = []
bsp_surf_edges: np.ndarray = self._bsp.get_lump('LUMP_SURFEDGES').surf_edges
bsp_vertices: np.ndarray = self._bsp.get_lump('LUMP_VERTICES').vertices
bsp_edges: np.ndarray = self._bsp.get_lump('LUMP_EDGES').edges
bsp_faces: List[Face] = self._bsp.get_lump('LUMP_FACES').faces
bsp_textures_info: List[TextureInfo] = self._bsp.get_lump('LUMP_TEXINFO').texture_info
bsp_textures_data: List[TextureData] = self._bsp.get_lump('LUMP_TEXDATA').texture_data
vertex_ids, material_ids = gather_vertex_ids(model, bsp_faces, bsp_surf_edges, bsp_edges)
unique_vertex_ids = np.unique(vertex_ids)
tmp2 = np.searchsorted(unique_vertex_ids, vertex_ids)
remapped = dict(zip(vertex_ids, tmp2))
material_lookup_table = {}
for texture_info in sorted(set(material_ids)):
texture_info = bsp_textures_info[texture_info]
texture_data = bsp_textures_data[texture_info.texture_data_id]
material_name = self._get_string(texture_data.name_id)
material_name = strip_patch_coordinates.sub("", material_name)[-63:]
material_lookup_table[texture_data.name_id] = add_material(material_name, mesh_obj)
uvs_per_face = []
luvs_per_face = []
for map_face in bsp_faces[model.first_face:model.first_face + model.face_count]:
if map_face.disp_info_id != -1:
continue
uvs = {}
luvs = {}
face = []
first_edge = map_face.first_edge
edge_count = map_face.edge_count
texture_info = bsp_textures_info[map_face.tex_info_id]
texture_data = bsp_textures_data[texture_info.texture_data_id]
tv1, tv2 = texture_info.texture_vectors
lv1, lv2 = texture_info.lightmap_vectors
used_surf_edges = bsp_surf_edges[first_edge:first_edge + edge_count]
reverse = np.subtract(1, (used_surf_edges > 0).astype(np.uint8))
used_edges = bsp_edges[np.abs(used_surf_edges)]
tmp = np.arange(len(used_edges))
face_vertex_ids = used_edges[tmp, reverse]
uv_vertices = bsp_vertices[face_vertex_ids]
u = (np.dot(uv_vertices, tv1[:3]) + tv1[3]) / (texture_data.width or 512)
v = 1 - ((np.dot(uv_vertices, tv2[:3]) + tv2[3]) / (texture_data.height or 512))
lu = (np.dot(uv_vertices, lv1[:3]) + lv1[3]) / (texture_data.width or 512)
lv = 1 - ((np.dot(uv_vertices, lv2[:3]) + lv2[3]) / (texture_data.height or 512))
v_uvs = np.dstack([u, v]).reshape((-1, 2))
l_uvs = np.dstack([lu, lv]).reshape((-1, 2))
for vertex_id, uv, luv in zip(face_vertex_ids, v_uvs, l_uvs):
new_vertex_id = remapped[vertex_id]
face.append(new_vertex_id)
uvs[new_vertex_id] = uv
luvs[new_vertex_id] = luv
material_indices.append(material_lookup_table[texture_data.name_id])
uvs_per_face.append(uvs)
luvs_per_face.append(luvs)
faces.append(face[::-1])
mesh_data.from_pydata(bsp_vertices[unique_vertex_ids] * self.scale, [], faces)
mesh_data.polygons.foreach_set('material_index', material_indices)
main_uv = mesh_data.uv_layers.new()
uv_data = main_uv.data
for poly in mesh_data.polygons:
for loop_index in range(poly.loop_start, poly.loop_start + poly.loop_total):
uv_data[loop_index].uv = uvs_per_face[poly.index][mesh_data.loops[loop_index].vertex_index]
lightmap_uv = mesh_data.uv_layers.new(name='lightmap')
uv_data = lightmap_uv.data
for poly in mesh_data.polygons:
for loop_index in range(poly.loop_start, poly.loop_start + poly.loop_total):
uv_data[loop_index].uv = luvs_per_face[poly.index][mesh_data.loops[loop_index].vertex_index]
return mesh_obj
def _handle_brush_model(self, class_name, group, entity, entity_raw):
if 'model' not in entity_raw:
return
model_id = int(entity_raw.get('model')[1:])
mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity))
self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(mesh_object, {'entity': entity_raw})
self._put_into_collection(class_name, mesh_object, group)
def _set_entity_data(self, obj, entity_raw: dict):
obj['entity_data'] = entity_raw
@staticmethod
def _get_entity_name(entity: Base):
if hasattr(entity, 'targetname') and entity.targetname:
return str(entity.targetname)
else:
return f'{entity.class_name}_{entity.hammer_id}'
def _put_into_collection(self, name, obj, grouping_collection_name=None):
if grouping_collection_name is not None:
parent_collection = get_or_create_collection(grouping_collection_name, self.parent_collection)
parent_collection = get_or_create_collection(name, parent_collection)
else:
parent_collection = get_or_create_collection(name, self.parent_collection)
parent_collection.objects.link(obj)
@staticmethod
def _apply_light_rotation(obj, entity):
obj.rotation_euler = Euler((0, math.radians(-90), 0))
obj.rotation_euler.rotate(Euler((
math.radians(entity.angles[2]),
math.radians(-entity.pitch),
math.radians(entity.angles[1])
)))
def _set_location_and_scale(self, obj, location, additional_scale=1.0):
scale = self.scale * additional_scale
obj.location = location
obj.location *= scale
obj.scale *= scale
def _set_location(self, obj, location):
obj.location = location
obj.location *= self.scale
@staticmethod
def _set_rotation(obj, angles):
if len(angles) < 3:
return
obj.rotation_euler.rotate(Euler((math.radians(angles[2]),
math.radians(angles[0]),
math.radians(angles[1]))))
@staticmethod
def _set_parent_if_exist(obj, parent_name):
if parent_name is None:
return
if parent_name in bpy.data.objects:
pass
before = obj.matrix_world.copy()
obj.parent = bpy.data.objects[parent_name]
obj.matrix_world = before
def _set_icon_if_present(self, obj, entity):
icon_path = getattr(entity, 'icon_sprite', None)
if icon_path is not None:
icon = bpy.data.images.get(Path(icon_path).stem, None)
if icon is None:
icon_material_file = ContentManager().find_material(icon_path, silent=True)
if not icon_material_file:
return
vmt = VMT(icon_material_file, icon_path)
texture = ContentManager().find_texture(vmt.get_string('$basetexture', None), silent=True)
if not texture:
return
icon = import_texture(Path(Path(icon_path).stem), texture)
obj.empty_display_type = 'IMAGE'
obj.empty_display_size = (1 / self.scale)
obj.data = icon
@staticmethod
def _create_lines(name, points, closed=False):
line_data = bpy.data.curves.new(name=f'{name}_data', type='CURVE')
line_data.dimensions = '3D'
line_data.fill_mode = 'FULL'
line_data.bevel_depth = 0
polyline = line_data.splines.new('POLY')
polyline.use_cyclic_u = closed
polyline.points.add(len(points) - 1)
for idx in range(len(points)):
polyline.points[idx].co = tuple(points[idx]) + (1.0,)
line = bpy.data.objects.new(f'{name}', line_data)
line.location = [0, 0, 0]
return line
def _get_class(self, class_name) -> type(Base):
if class_name in self.entity_lookup_table:
entity_object = self.entity_lookup_table[class_name]
return entity_object
else:
return Base
def resolve_parents(self, entity_raw: dict):
entity = self._get_class(entity_raw['classname'])
entity.from_dict(entity, entity_raw)
if hasattr(entity, 'targetname') and hasattr(entity, 'parentname'):
if entity.targetname and str(entity.targetname) in bpy.data.objects:
obj = bpy.data.objects[entity.targetname]
self._set_parent_if_exist(obj, entity.parentname)
@staticmethod
def _create_empty(name):
empty = bpy.data.objects.new(name, None)
empty.empty_display_size = 16
return empty
def _handle_entity_with_model(self, entity, entity_raw: dict):
if hasattr(entity, 'model') and entity.model:
model_path = entity.model
elif hasattr(entity, 'model_') and entity.model_:
model_path = entity.model_
elif hasattr(entity, 'viewport_model') and entity.viewport_model:
model_path = entity.viewport_model
else:
model_path = 'error.mdl'
obj = self._create_empty(self._get_entity_name(entity))
properties = {'prop_path': model_path,
'type': entity.class_name,
'scale': self.scale,
'entity': entity_raw}
self._set_location_and_scale(obj, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(obj, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(obj, properties)
return obj
|
3167d0398441995e7e8664513789d61fc069e9f1
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/sparse/pagerank.py
|
516c07a13ab71155e1ff07fa1b05b3e90e464607
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
pagerank.py
|
import dgl.sparse as dglsp
import networkx as nx
import torch
N = 100
DAMP = 0.85
K = 10
def pagerank(A):
D = A.sum(0)
V = torch.ones(N) / N
for _ in range(K):
########################################################################
# (HIGHLIGHT) Take the advantage of DGL sparse APIs to calculate the
# page rank.
########################################################################
V = (1 - DAMP) / N + DAMP * A @ (V / D)
return V
if __name__ == "__main__":
g = nx.erdos_renyi_graph(N, 0.05, seed=10086)
# Create the adjacency matrix of graph.
edges = list(g.to_directed().edges())
indices = torch.tensor(edges).transpose(0, 1)
A = dglsp.spmatrix(indices, shape=(N, N))
V = pagerank(A)
print(V)
|
be8a263ffcb075085766845286dda0b34f30d9d3
|
36ef8b40191c13344a5b3fb6bb2ab1cfb64b83a8
|
/December-11/python_UjjwalPrahladka_Dec11.py
|
0306899586f4be8a08e3e979cb41c72aba754f0e
|
[
"MIT"
] |
permissive
|
SVCE-ACM/A-December-of-Algorithms-2019
|
a9e2436b29db8ed5e488719c6e45c78ccbd49bec
|
d15a4e8284c8576b7080c999d4b46748f4d1d09b
|
refs/heads/master
| 2023-02-03T20:45:17.211079
| 2022-08-13T07:41:13
| 2022-08-13T07:41:13
| 222,771,373
| 231
| 193
|
MIT
| 2023-06-18T04:02:23
| 2019-11-19T19:18:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
python_UjjwalPrahladka_Dec11.py
|
import re
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
email = input("Enter email: ")
if(re.search(regex,email)):
print("Valid Email")
else:
print("Invalid Email")
|
7795b3bfb9812301dd9428f857fc1918513d1020
|
0db05f7b843e8450bafd5ae23f8f70f9a9a8c151
|
/Src/StdLib/Lib/site-packages/win32/Demos/BackupRead_BackupWrite.py
|
e35ad5888e835d026a14d446db587e853f9372c4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"LGPL-2.0-only"
] |
permissive
|
IronLanguages/ironpython2
|
9c7f85bd8e6bca300e16f8c92f6384cecb979a6a
|
d00111890ce41b9791cb5bc55aedd071240252c4
|
refs/heads/master
| 2023-01-21T21:17:59.439654
| 2023-01-13T01:52:15
| 2023-01-13T01:52:15
| 91,620,472
| 1,171
| 288
|
Apache-2.0
| 2023-01-13T01:52:16
| 2017-05-17T21:11:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,556
|
py
|
BackupRead_BackupWrite.py
|
## demonstrates using BackupRead and BackupWrite to copy all of a file's data streams
import win32file, win32api, win32con, win32security, ntsecuritycon
from win32com import storagecon
import pythoncom, pywintypes
import struct, traceback
from pywin32_testutil import str2bytes, ob2memory
all_sd_info=win32security.DACL_SECURITY_INFORMATION|win32security.DACL_SECURITY_INFORMATION| \
win32security.OWNER_SECURITY_INFORMATION|win32security.GROUP_SECURITY_INFORMATION
tempdir=win32api.GetTempPath()
tempfile=win32api.GetTempFileName(tempdir,'bkr')[0]
outfile=win32api.GetTempFileName(tempdir,'out')[0]
print 'Filename:',tempfile,'Output file:',outfile
f=open(tempfile,'w')
f.write('some random junk'+'x'*100)
f.close()
## add a couple of alternate data streams
f=open(tempfile+':streamdata','w')
f.write('data written to alternate stream'+'y'*100)
f.close()
f=open(tempfile+':anotherstream','w')
f.write('z'*100)
f.close()
## add Summary Information, which is stored as a separate stream
m=storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE |storagecon.STGM_DIRECT
pss=pythoncom.StgOpenStorageEx(tempfile, m, storagecon.STGFMT_FILE, 0 , pythoncom.IID_IPropertySetStorage,None)
ps=pss.Create(pythoncom.FMTID_SummaryInformation,pythoncom.IID_IPropertyStorage,0,storagecon.STGM_READWRITE|storagecon.STGM_SHARE_EXCLUSIVE)
ps.WriteMultiple((storagecon.PIDSI_KEYWORDS,storagecon.PIDSI_COMMENTS),('keywords','comments'))
ps=None
pss=None
## add a custom security descriptor to make sure we don't
## get a default that would always be the same for both files in temp dir
new_sd=pywintypes.SECURITY_DESCRIPTOR()
sid=win32security.LookupAccountName('','EveryOne')[0]
acl=pywintypes.ACL()
acl.AddAccessAllowedAce(1, win32con.GENERIC_READ, sid)
acl.AddAccessAllowedAce(1, ntsecuritycon.FILE_APPEND_DATA, sid)
acl.AddAccessAllowedAce(1, win32con.GENERIC_WRITE, sid)
acl.AddAccessAllowedAce(1, ntsecuritycon.FILE_ALL_ACCESS, sid)
new_sd.SetSecurityDescriptorDacl(True, acl, False)
win32security.SetFileSecurity(tempfile,win32security.DACL_SECURITY_INFORMATION,new_sd)
sa=pywintypes.SECURITY_ATTRIBUTES()
sa.bInheritHandle=True
h=win32file.CreateFile(tempfile, win32con.GENERIC_ALL ,win32con.FILE_SHARE_READ,
sa, win32con.OPEN_EXISTING, win32file.FILE_FLAG_BACKUP_SEMANTICS , None)
outh=win32file.CreateFile(outfile, win32con.GENERIC_ALL ,win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
sa, win32con.OPEN_EXISTING, win32file.FILE_FLAG_BACKUP_SEMANTICS , None)
ctxt=0
outctxt=0
buf=None
readsize=100
while 1:
bytes_read, buf, ctxt=win32file.BackupRead(h, readsize, buf, False, True, ctxt)
if bytes_read==0:
break
bytes_written, outctxt=win32file.BackupWrite(outh, bytes_read, buf, False, True, outctxt)
print 'Written:',bytes_written,'Context:',outctxt
win32file.BackupRead(h, 0, buf, True, True, ctxt)
win32file.BackupWrite(outh, 0, str2bytes(''), True, True, outctxt)
win32file.CloseHandle(h)
win32file.CloseHandle(outh)
assert open(tempfile).read()==open(outfile).read(),"File contents differ !"
assert open(tempfile+':streamdata').read()==open(outfile+':streamdata').read(),"streamdata contents differ !"
assert open(tempfile+':anotherstream').read()==open(outfile+':anotherstream').read(),"anotherstream contents differ !"
assert ob2memory(win32security.GetFileSecurity(tempfile,all_sd_info))[:]== \
ob2memory(win32security.GetFileSecurity(outfile, all_sd_info))[:], "Security descriptors are different !"
## also should check Summary Info programatically
|
6c0c7cb6f4db5c2dd808f7ac31fbca61c16ad6cb
|
88dd4380e0d33d4a118ca4e69e4ca9b1c8f45e1f
|
/pyspedas/rbsp/load.py
|
8137ec4a8ee666ddfa9ed7dc7e02c5b47dfdd220
|
[
"MIT"
] |
permissive
|
spedas/pyspedas
|
16d34015961e3a4d3eaf8637d3cb6abca95df1b1
|
1d07b148753afa96e148c5835ed9545c507577da
|
refs/heads/master
| 2023-09-01T16:07:47.131334
| 2023-08-25T17:15:35
| 2023-08-25T17:15:35
| 167,614,292
| 125
| 61
|
MIT
| 2023-09-08T18:41:27
| 2019-01-25T21:11:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,244
|
py
|
load.py
|
from pyspedas.utilities.dailynames import dailynames
from pyspedas.utilities.download import download
from pytplot import time_clip as tclip
from pytplot import cdf_to_tplot
from .config import CONFIG
def load(trange=['2018-11-5', '2018-11-6'],
probe='a',
instrument='emfisis',
level='l3',
datatype='magnetometer',
suffix='',
cadence='4sec', # for EMFISIS mag data
coord='sm', # for EMFISIS mag data
wavetype='waveform', # for EMFISIS waveform data
rel='rel04', # for ECT data
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads Van Allen Probes (RBSP) data; this function is not meant
to be called directly; instead, see the wrappers:
pyspedas.rbsp.emfisis
pyspedas.rbsp.rbspice
pyspedas.rbsp.efw
pyspedas.rbsp.mageis
pyspedas.rbsp.hope
pyspedas.rbsp.rept
pyspedas.rbsp.rps
"""
if not isinstance(probe, list):
probe = [probe]
datatype_in = datatype
datatype = datatype.lower()
prefix = ''
out_files = []
if notplot:
tvars = {}
else:
tvars = []
for prb in probe:
if instrument == 'emfisis':
if datatype == 'density' or datatype == 'housekeeping' or datatype == 'wna-survey':
pathformat = 'rbsp'+prb+'/'+level+'/'+instrument+'/'+datatype+'/%Y/rbsp-'+prb+'_'+datatype+'_'+instrument+'-'+level+'_%Y%m%d_v*.cdf'
elif datatype == 'wfr' or datatype == 'hfr':
pathformat = 'rbsp'+prb+'/'+level+'/'+instrument+'/'+datatype+'/'+wavetype+'/%Y/rbsp-'+prb+'_'+datatype+'-'+wavetype+'_'+instrument+'-'+level+'_%Y%m%d*_v*.cdf'
else:
if level == 'l2' and datatype == 'magnetometer':
pathformat = 'rbsp'+prb+'/'+level+'/'+instrument+'/'+datatype+'/uvw/%Y/rbsp-'+prb+'_'+datatype+'_uvw_'+instrument+'-'+level+'_%Y%m%d*_v*.cdf'
else:
pathformat = 'rbsp'+prb+'/'+level+'/'+instrument+'/'+datatype+'/'+cadence+'/'+coord+'/%Y/rbsp-'+prb+'_'+datatype+'_'+cadence+'-'+coord+'_'+instrument+'-'+level+'_%Y%m%d_v*.cdf'
elif instrument == 'rbspice':
pathformat = 'rbsp'+prb+'/'+level+'/'+instrument+'/'+datatype+'/%Y/rbsp-'+prb+'-'+instrument+'_lev-'+str(level[-1])+'?'+datatype+'_%Y%m%d_v*.cdf'
prefix = 'rbsp'+prb+'_rbspice_'+level+'_'+datatype_in+'_'
elif instrument == 'efw':
if level == 'l3':
pathformat = 'rbsp'+prb+'/'+level+'/'+instrument+'/%Y/rbsp'+prb+'_'+instrument+'-'+level+'_%Y%m%d_v??.cdf'
else:
pathformat = 'rbsp'+prb+'/'+level+'/'+instrument+'/'+datatype+'/%Y/rbsp'+prb+'_'+instrument+'-'+level+'_'+datatype+'_%Y%m%d_v??.cdf'
elif instrument == 'mageis':
pathformat = 'rbsp'+prb+'/'+level+'/ect/'+instrument+'/sectors/'+rel+'/%Y/rbsp'+prb+'_'+rel+'_ect-mageis-'+level+'_%Y%m%d_v*.cdf'
elif instrument == 'hope':
if datatype == 'moments':
pathformat = 'rbsp'+prb+'/'+level+'/ect/'+instrument+'/'+datatype+'/'+rel+'/%Y/rbsp'+prb+'_'+rel+'_ect-hope-mom-'+level+'_%Y%m%d_v*.cdf'
elif datatype == 'pitchangle':
pathformat = 'rbsp'+prb+'/'+level+'/ect/'+instrument+'/'+datatype+'/'+rel+'/%Y/rbsp'+prb+'_'+rel+'_ect-hope-pa-'+level+'_%Y%m%d_v*.cdf'
elif datatype == 'spinaverage':
pathformat = 'rbsp'+prb+'/'+level+'/ect/'+instrument+'/'+datatype+'/'+rel+'/%Y/rbsp'+prb+'_'+rel+'_ect-hope-sci-'+level+'sa_%Y%m%d_v*.cdf'
elif instrument == 'rept':
pathformat = 'rbsp'+prb+'/'+level+'/ect/'+instrument+'/sectors/'+rel+'/%Y/rbsp'+prb+'_'+rel+'_ect-rept-sci-'+level+'_%Y%m%d_v*.cdf'
elif instrument == 'rps':
if datatype == 'rps-1min':
pathformat = 'rbsp'+prb+'/'+level+'/rps/psbr-rps-1min/%Y/rbsp'+prb+'_'+level+'-1min_psbr-rps_%Y%m%d_v*.cdf'
elif datatype == 'rps':
pathformat = 'rbsp'+prb+'/'+level+'/rps/psbr-rps/%Y/rbsp'+prb+'_'+level+'_psbr-rps_%Y%m%d_v*.cdf'
# find the full remote path names using the trange
remote_names = dailynames(file_format=pathformat, trange=trange)
files = download(remote_file=remote_names, remote_path=CONFIG['remote_data_dir'], local_path=CONFIG['local_data_dir'], no_download=no_update)
if files is not None:
for file in files:
out_files.append(file)
if not downloadonly:
tvars_o = cdf_to_tplot(sorted(out_files), prefix=prefix, suffix=suffix, get_support_data=get_support_data,
varformat=varformat, varnames=varnames, notplot=notplot)
if notplot:
tvars = dict(tvars, **tvars_o)
else:
tvars.extend(tvars_o)
if downloadonly:
return sorted(out_files)
if notplot:
return tvars
if time_clip:
for new_var in tvars:
tclip(new_var, trange[0], trange[1], suffix='')
return tvars
|
581ffe6e886b4ec12e251a0c397d0d579224d6a5
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/cpyext/pystrtod.py
|
775db1194701dc532f0c9c1f1c9a9a7e5afedb42
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,084
|
py
|
pystrtod.py
|
import errno
from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import cpython_api, CONST_STRING
from pypy.module.cpyext.pyobject import PyObject
from rpython.rlib import rdtoa
from rpython.rlib import rfloat
from rpython.rlib import rposix, jit
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem import rffi
# PyOS_double_to_string's "type", if non-NULL, will be set to one of:
Py_DTST_FINITE = 0
Py_DTST_INFINITE = 1
Py_DTST_NAN = 2
# Match the "type" back to values in CPython
DOUBLE_TO_STRING_TYPES_MAP = {
rfloat.DIST_FINITE: Py_DTST_FINITE,
rfloat.DIST_INFINITY: Py_DTST_INFINITE,
rfloat.DIST_NAN: Py_DTST_NAN
}
@cpython_api([CONST_STRING, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0)
@jit.dont_look_inside # direct use of _get_errno()
def PyOS_string_to_double(space, s, endptr, w_overflow_exception):
"""Convert a string s to a double, raising a Python
exception on failure. The set of accepted strings corresponds to
the set of strings accepted by Python's float() constructor,
except that s must not have leading or trailing whitespace.
The conversion is independent of the current locale.
If endptr is NULL, convert the whole string. Raise
ValueError and return -1.0 if the string is not a valid
representation of a floating-point number.
If endptr is not NULL, convert as much of the string as
possible and set *endptr to point to the first unconverted
character. If no initial segment of the string is the valid
representation of a floating-point number, set *endptr to point
to the beginning of the string, raise ValueError, and return
-1.0.
If s represents a value that is too large to store in a float
(for example, "1e500" is such a string on many platforms) then
if overflow_exception is NULL return Py_HUGE_VAL (with
an appropriate sign) and don't set any exception. Otherwise,
overflow_exception must point to a Python exception object;
raise that exception and return -1.0. In both cases, set
*endptr to point to the first character after the converted value.
If any other error occurs during the conversion (for example an
out-of-memory error), set the appropriate Python exception and
return -1.0.
"""
user_endptr = True
try:
if not endptr:
endptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
user_endptr = False
result = rdtoa.dg_strtod(s, endptr)
endpos = (rffi.cast(rffi.LONG, endptr[0]) -
rffi.cast(rffi.LONG, s))
if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'):
raise oefmt(space.w_ValueError,
"invalid input at position %d", endpos)
err = rffi.cast(lltype.Signed, rposix._get_errno())
if err == errno.ERANGE:
rposix._set_errno(rffi.cast(rffi.INT, 0))
if w_overflow_exception is None:
if result > 0:
return rfloat.INFINITY
else:
return -rfloat.INFINITY
else:
raise oefmt(w_overflow_exception, "value too large")
return result
finally:
if not user_endptr:
lltype.free(endptr, flavor='raw')
@cpython_api([rffi.DOUBLE, lltype.Char, rffi.INT_real, rffi.INT_real, rffi.INTP], rffi.CCHARP)
def PyOS_double_to_string(space, val, format_code, precision, flags, ptype):
"""Convert a double val to a string using supplied
format_code, precision, and flags.
format_code must be one of 'e', 'E', 'f', 'F',
'g', 'G' or 'r'. For 'r', the supplied precision
must be 0 and is ignored. The 'r' format code specifies the
standard repr() format.
flags can be zero or more of the values Py_DTSF_SIGN,
Py_DTSF_ADD_DOT_0, or Py_DTSF_ALT, or-ed together:
Py_DTSF_SIGN means to always precede the returned string with a sign
character, even if val is non-negative.
Py_DTSF_ADD_DOT_0 means to ensure that the returned string will not look
like an integer.
Py_DTSF_ALT means to apply "alternate" formatting rules. See the
documentation for the PyOS_snprintf() '#' specifier for
details.
If ptype is non-NULL, then the value it points to will be set to one of
Py_DTST_FINITE, Py_DTST_INFINITE, or Py_DTST_NAN, signifying that
val is a finite number, an infinite number, or not a number, respectively.
The return value is a pointer to buffer with the converted string or
NULL if the conversion failed. The caller is responsible for freeing the
returned string by calling PyMem_Free().
"""
buffer, rtype = rfloat.double_to_string(val, format_code,
intmask(precision),
intmask(flags))
if ptype != lltype.nullptr(rffi.INTP.TO):
ptype[0] = rffi.cast(rffi.INT, DOUBLE_TO_STRING_TYPES_MAP[rtype])
bufp = rffi.str2charp(buffer)
return bufp
|
de3db362623c536ac281e2ff9b38567030b75e77
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/procedures/glibc/__ctype_toupper_loc.py
|
521a433d1aae7f4cd31da2f6980d9a23013abbe3
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 892
|
py
|
__ctype_toupper_loc.py
|
import angr
######################################
# __ctype_toupper_loc
######################################
class __ctype_toupper_loc(angr.SimProcedure):
"""
Following is the description from linuxfoundation.org:
The __ctype_toupper_loc() function shall return a pointer into an array
of characters in the current locale that contains upper case equivalents
for each character in the current character set. The array shall contain
a total of 384 characters, and can be indexed with any signed or unsigned
char (i.e. with an index value between -128 and 255). If the application
is multithreaded, the array shall be local to the current thread.
This interface is not in the source standard; it is only in the binary
standard.
"""
def run(self):
table_ptr = self.state.libc.ctype_toupper_loc_table_ptr
return table_ptr
|
1c82f77c8f09515f78c389bb4ae2c9b41ae2b097
|
e51ec12f619d0adf0bad7cc4a8851415ed89ddab
|
/torch_struct/cky_crf.py
|
f9e16a129c70a8d49956ffeda40b4d18365d8fbd
|
[
"MIT"
] |
permissive
|
harvardnlp/pytorch-struct
|
9cc0e3167b4fe00e025216176ca054bb537f600e
|
7146de5659ff17ad7be53023c025ffd099866412
|
refs/heads/master
| 2023-07-10T10:23:57.946098
| 2022-01-30T19:49:08
| 2022-01-30T19:49:08
| 204,547,575
| 1,139
| 91
|
MIT
| 2022-01-30T19:49:10
| 2019-08-26T19:34:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
cky_crf.py
|
import torch
from .helpers import _Struct, Chart
A, B = 0, 1
class CKY_CRF(_Struct):
def _check_potentials(self, edge, lengths=None):
batch, N, _, NT = self._get_dimension(edge)
edge = self.semiring.convert(edge)
if lengths is None:
lengths = torch.LongTensor([N] * batch).to(edge.device)
return edge, batch, N, NT, lengths
def logpartition(self, scores, lengths=None, force_grad=False):
semiring = self.semiring
scores, batch, N, NT, lengths = self._check_potentials(scores, lengths)
beta = [Chart((batch, N, N), scores, semiring) for _ in range(2)]
L_DIM, R_DIM = 2, 3
# Initialize
reduced_scores = semiring.sum(scores)
term = reduced_scores.diagonal(0, L_DIM, R_DIM)
ns = torch.arange(N)
beta[A][ns, 0] = term
beta[B][ns, N - 1] = term
# Run
for w in range(1, N):
left = slice(None, N - w)
right = slice(w, None)
Y = beta[A][left, :w]
Z = beta[B][right, N - w :]
score = reduced_scores.diagonal(w, L_DIM, R_DIM)
new = semiring.times(semiring.dot(Y, Z), score)
beta[A][left, w] = new
beta[B][right, N - w - 1] = new
final = beta[A][0, :]
log_Z = final[:, torch.arange(batch), lengths - 1]
return log_Z, [scores]
|
830ddf2b1dd51ef6e79f197160e4418d83059d73
|
73305ddcc6dc9775b1e9a71506e2f3c74f678edc
|
/starthinker_ui/website/management/commands/announce.py
|
869e63a5fe5dc0cb14f1bdb9d207b24f909eeb40
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
google/starthinker
|
ef359557da4140275a8524d0d813eecf022ece9e
|
b596df09c52511e2e0c0987f6245aa4607190dd0
|
refs/heads/master
| 2023-08-25T21:16:45.578012
| 2023-07-17T22:19:18
| 2023-07-17T22:20:10
| 123,017,995
| 167
| 64
|
Apache-2.0
| 2023-08-02T01:24:51
| 2018-02-26T19:15:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,195
|
py
|
announce.py
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from datetime import date, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from starthinker.util.email import send_email
from starthinker.util.email.template import EmailTemplate
from starthinker_ui.recipe.scripts import Script
class Command(BaseCommand):
help = 'Generate Newsletter Of New Solutions'
def add_arguments(self, parser):
parser.add_argument(
'--days',
action='store',
dest='days',
default=90,
type=int,
help='Number of days to go back for recipes.',
)
def handle(self, *args, **kwargs):
day = date.today() - timedelta(days=kwargs['days'])
email = {
'subject':
'Announcing Six New Open Source Modules For Ecosystems',
'style': {
'background': '#f2f2f2',
'foreground': '#ffffff',
'text': '#414347',
'link': '#4285f4',
'font': 'Roboto, Helvetica, Arial sans-serif;',
'align': 'left'
},
'logo':
'https://google.github.io/starthinker/static/gTech_StarThinker.png',
'body': {
'sections': [{
'header':
'Six New Solutions For Partners To Build New Services',
'paragraph':
'In Q1, StarThinker released 6 new building blocks '
'available as Python, Airflow, Colab, and no-coding UI. '
'These building blocks are now open sourve and availbale '
'for deployment by Partners. Below is a description of '
'each solution and possible service or efficiency gain by '
'partners.',
'grid': []
}]
},
'footer': [{
'text': 'Internal UI',
'link': 'http://go/starthinker'
}, {
'text': 'GitHub Solution Gallery',
'link': 'https://google.github.io/starthinker/'
}, {
'text': 'Google3 Repository',
'link': 'http://go/starthinker-google3'
}, {
'text': 'GOB Repository ( Official )',
'link': 'http://go/starthinker-code'
}, {
'text': 'GitHub Repository',
'link': 'https://github.com/google/starthinker'
}],
'copyright':
'Copyright 2020 Google LLC'
}
odd = True
for s in Script.get_scripts():
if s.get_released() < day:
continue
print('SCRIPT: ', s.get_tag())
if not s.get_image():
continue
row = [{
'image': {
'src': s.get_image(),
'link': s.get_link_client()
}
}, {
'header': '[%s](%s)' % (s.get_name(), s.get_link_client()),
'paragraph': s.get_description()
}]
email['body']['sections'][0]['grid'].append(row)
if odd:
row.reverse()
odd = not odd
email = EmailTemplate(email)
# send or print
#if project.args.email_to and project.args.email_from:
# print('EMAILING: ', project.args.email_to)
# send_email('user', project.args.email_to, project.args.email_from, None, email.get_subject(), email.get_text(), email.get_html())
#else:
if 1:
# write to STDOUT
print(email.get_html())
print('<pre style="width:600px;margin:0px auto;">%s</pre>' %
email.get_text())
|
b8b284273226d8710523588c550dbe0a2a881493
|
dcee09bdd347bbb0e46b6133a7e348f0be90d9f3
|
/tests/test_RunJob.py
|
4c871b564343785696f8bb117ed22a85374db19a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Accenture/jenkins-attack-framework
|
01a7964fa6561d748bbfaa1a78a3f32dba2315f4
|
ce0de8e2c014ccc42ae0f159e40f36415a86e9e3
|
refs/heads/master
| 2023-04-18T14:15:32.116078
| 2021-05-11T13:47:56
| 2021-05-11T13:47:56
| 364,371,887
| 511
| 54
|
MIT
| 2021-05-11T13:47:57
| 2021-05-04T20:01:00
|
Python
|
UTF-8
|
Python
| false
| false
| 12,237
|
py
|
test_RunJob.py
|
import concurrent.futures
import os
import random
import string
import tempfile
import unittest
import warnings
from libs.JAF.BaseCommandLineParser import BaseCommandLineParser
from libs.JAF.plugin_RunJob import RunJob, RunJobParser
from .configuration import (
computer_linux,
computer_windows_admin,
computer_windows_normal,
server,
user_admin,
user_bad,
user_noaccess,
user_normal,
user_read_job_access,
user_read_no_job_access,
)
from .helpers import DummyWebServer, RemoteFeedbackTester, TestFramework
class DumpCredsViaJobTest(unittest.TestCase, TestFramework):
@classmethod
def setUpClass(cls):
cls.credential_test_job1 = "testRunJob1" + "".join(
random.choices(string.ascii_letters + string.digits, k=20)
)
cls.credential_test_job2 = "testRunJob2" + "".join(
random.choices(string.ascii_letters + string.digits, k=20)
)
cls.remote_feedback = RemoteFeedbackTester(12345, 50)
f, cls.ping_script_windows = tempfile.mkstemp(text=True, suffix=".bat")
os.write(f, cls.remote_feedback.get_script("python").encode("utf8"))
os.close(f)
f, cls.ping_script_linux = tempfile.mkstemp(text=True, suffix=".sh")
os.write(f, cls.remote_feedback.get_script("python").encode("utf8"))
os.close(f)
@classmethod
def teardownClass(cls):
os.remove(cls.ping_script_windows)
os.remove(cls.ping_script_linux)
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "RunJob"
self.TestParserClass = RunJobParser
self.TestClass = RunJob
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
"https://127.0.0.1:59321/",
"-a",
user_bad,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
"https://127.0.0.1:59322/",
"-a",
user_bad,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
"http://127.0.0.1:59322/",
"-a",
user_bad,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_bad,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_noaccess,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_read_no_job_creds(self):
"""Make sure that calling with valid jenkins (read only [no job access] creds) returns expected results"""
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_no_job_access,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_read_job_creds(self):
"""Make sure that calling with valid jenkins (read only [job access] creds) returns expected results"""
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_job_access,
self.credential_test_job1,
self.ping_script_linux,
],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
# Swapping order because last test doesn't clean up completely.
def test_1_valid_jenkins_valid_admin_creds_posix(self):
"""Make sure that calling with valid jenkins (admin creds, POSIX) returns expected results"""
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(self.remote_feedback.got_connect_back)
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-N",
computer_linux,
"-T",
"posix",
self.credential_test_job1,
self.ping_script_linux,
]
)
self.assertTrue(future.result())
def test_1_valid_jenkins_valid_admin_creds_windows(self):
"""Make sure that calling with valid jenkins (admin creds, Windows) returns expected results"""
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(self.remote_feedback.got_connect_back)
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-N",
computer_windows_admin,
"-T",
"windows",
self.credential_test_job1,
self.ping_script_windows,
]
)
self.assertTrue(future.result())
def test_2_valid_jenkins_valid_admin_creds_ghost_job_windows_unprivileged(self):
"""Make sure that calling with valid jenkins (admin creds, Windows, unprivileged ghost job) returns expected results"""
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(self.remote_feedback.got_connect_back)
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-g",
"-N",
computer_windows_normal,
"-T",
"windows",
self.credential_test_job1,
self.ping_script_windows,
]
)
self.assertTrue(future.result())
def test_2_valid_jenkins_valid_admin_creds_ghost_job_windows_elevated(self):
"""Make sure that calling with valid jenkins (admin creds, Windows, elevated ghost job) returns expected results"""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(self.remote_feedback.got_connect_back)
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-g",
"-N",
computer_windows_admin,
"-T",
"windows",
self.credential_test_job1,
self.ping_script_windows,
]
)
self.assertTrue(future.result())
def test_3_valid_jenkins_valid_normal_creds_linux(self):
"""Make sure that calling with valid jenkins (normal creds, POSIX) returns expected results"""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(self.remote_feedback.got_connect_back)
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_normal,
"-N",
computer_linux,
"-T",
"posix",
self.credential_test_job1,
self.ping_script_linux,
]
)
self.assertTrue(future.result())
def test_3_valid_jenkins_valid_normal_creds_windows(self):
"""Make sure that calling with valid jenkins (normal creds, Windows) returns expected results"""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(self.remote_feedback.got_connect_back)
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_normal,
"-N",
computer_windows_normal,
"-T",
"windows",
self.credential_test_job2,
self.ping_script_windows,
]
)
self.assertTrue(future.result())
class DumpCredsViaJobParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "RunJob"
self.TestClass = RunJob
self.TestParserClass = RunJobParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
if __name__ == "__main__":
unittest.main()
|
8f62ceb617f30072de639bfd1366a4423ea8aec9
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/appservice/hybrid_connection.py
|
0f3fd4eb067fc522d7cb454e86f59556190151f8
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 27,790
|
py
|
hybrid_connection.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HybridConnectionArgs', 'HybridConnection']
@pulumi.input_type
class HybridConnectionArgs:
def __init__(__self__, *,
app_service_name: pulumi.Input[str],
hostname: pulumi.Input[str],
port: pulumi.Input[int],
relay_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
send_key_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a HybridConnection resource.
:param pulumi.Input[str] app_service_name: Specifies the name of the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] hostname: The hostname of the endpoint.
:param pulumi.Input[int] port: The port of the endpoint.
:param pulumi.Input[str] relay_id: The ID of the Service Bus Relay. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] send_key_name: The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
"""
pulumi.set(__self__, "app_service_name", app_service_name)
pulumi.set(__self__, "hostname", hostname)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "relay_id", relay_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if send_key_name is not None:
pulumi.set(__self__, "send_key_name", send_key_name)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the App Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "app_service_name")
@app_service_name.setter
def app_service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "app_service_name", value)
@property
@pulumi.getter
def hostname(self) -> pulumi.Input[str]:
"""
The hostname of the endpoint.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: pulumi.Input[str]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
The port of the endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="relayId")
def relay_id(self) -> pulumi.Input[str]:
"""
The ID of the Service Bus Relay. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "relay_id")
@relay_id.setter
def relay_id(self, value: pulumi.Input[str]):
pulumi.set(self, "relay_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sendKeyName")
def send_key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
"""
return pulumi.get(self, "send_key_name")
@send_key_name.setter
def send_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "send_key_name", value)
@pulumi.input_type
class _HybridConnectionState:
def __init__(__self__, *,
app_service_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
relay_id: Optional[pulumi.Input[str]] = None,
relay_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
send_key_name: Optional[pulumi.Input[str]] = None,
send_key_value: Optional[pulumi.Input[str]] = None,
service_bus_namespace: Optional[pulumi.Input[str]] = None,
service_bus_suffix: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering HybridConnection resources.
:param pulumi.Input[str] app_service_name: Specifies the name of the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] hostname: The hostname of the endpoint.
:param pulumi.Input[str] namespace_name: The name of the Relay Namespace.
:param pulumi.Input[int] port: The port of the endpoint.
:param pulumi.Input[str] relay_id: The ID of the Service Bus Relay. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] send_key_name: The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
:param pulumi.Input[str] send_key_value: The value of the Service Bus Primary Access key.
:param pulumi.Input[str] service_bus_namespace: The name of the Service Bus namespace.
:param pulumi.Input[str] service_bus_suffix: The suffix for the service bus endpoint.
"""
if app_service_name is not None:
pulumi.set(__self__, "app_service_name", app_service_name)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if namespace_name is not None:
pulumi.set(__self__, "namespace_name", namespace_name)
if port is not None:
pulumi.set(__self__, "port", port)
if relay_id is not None:
pulumi.set(__self__, "relay_id", relay_id)
if relay_name is not None:
pulumi.set(__self__, "relay_name", relay_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if send_key_name is not None:
pulumi.set(__self__, "send_key_name", send_key_name)
if send_key_value is not None:
pulumi.set(__self__, "send_key_value", send_key_value)
if service_bus_namespace is not None:
pulumi.set(__self__, "service_bus_namespace", service_bus_namespace)
if service_bus_suffix is not None:
pulumi.set(__self__, "service_bus_suffix", service_bus_suffix)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the App Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "app_service_name")
@app_service_name.setter
def app_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_service_name", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
The hostname of the endpoint.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Relay Namespace.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port of the endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="relayId")
def relay_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Service Bus Relay. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "relay_id")
@relay_id.setter
def relay_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "relay_id", value)
@property
@pulumi.getter(name="relayName")
def relay_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "relay_name")
@relay_name.setter
def relay_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "relay_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sendKeyName")
def send_key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
"""
return pulumi.get(self, "send_key_name")
@send_key_name.setter
def send_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "send_key_name", value)
@property
@pulumi.getter(name="sendKeyValue")
def send_key_value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the Service Bus Primary Access key.
"""
return pulumi.get(self, "send_key_value")
@send_key_value.setter
def send_key_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "send_key_value", value)
@property
@pulumi.getter(name="serviceBusNamespace")
def service_bus_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Service Bus namespace.
"""
return pulumi.get(self, "service_bus_namespace")
@service_bus_namespace.setter
def service_bus_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_bus_namespace", value)
@property
@pulumi.getter(name="serviceBusSuffix")
def service_bus_suffix(self) -> Optional[pulumi.Input[str]]:
"""
The suffix for the service bus endpoint.
"""
return pulumi.get(self, "service_bus_suffix")
@service_bus_suffix.setter
def service_bus_suffix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_bus_suffix", value)
class HybridConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
relay_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
send_key_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an App Service Hybrid Connection for an existing App Service, Relay and Service Bus.
!> **NOTE:** This resource has been deprecated in version 3.0 of the AzureRM provider and will be removed in version 4.0. Please use `appservice.FunctionAppHybridConnection` resources instead.
## Example Usage
This example provisions an App Service, a Relay Hybrid Connection, and a Service Bus using their outputs to create the App Service Hybrid Connection.
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_namespace = azure.relay.Namespace("exampleNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Standard")
example_hybrid_connection = azure.relay.HybridConnection("exampleHybridConnection",
resource_group_name=example_resource_group.name,
relay_namespace_name=example_namespace.name,
user_metadata="examplemetadata")
example_appservice_hybrid_connection_hybrid_connection = azure.appservice.HybridConnection("exampleAppservice/hybridConnectionHybridConnection",
app_service_name=example_app_service.name,
resource_group_name=example_resource_group.name,
relay_id=example_hybrid_connection.id,
hostname="testhostname.example",
port=8080,
send_key_name="exampleSharedAccessKey")
```
## Import
App Service Hybrid Connections can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/hybridConnection:HybridConnection example /subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/exampleResourceGroup1/providers/Microsoft.Web/sites/exampleAppService1/hybridConnectionNamespaces/exampleRN1/relays/exampleRHC1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: Specifies the name of the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] hostname: The hostname of the endpoint.
:param pulumi.Input[int] port: The port of the endpoint.
:param pulumi.Input[str] relay_id: The ID of the Service Bus Relay. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] send_key_name: The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HybridConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an App Service Hybrid Connection for an existing App Service, Relay and Service Bus.
!> **NOTE:** This resource has been deprecated in version 3.0 of the AzureRM provider and will be removed in version 4.0. Please use `appservice.FunctionAppHybridConnection` resources instead.
## Example Usage
This example provisions an App Service, a Relay Hybrid Connection, and a Service Bus using their outputs to create the App Service Hybrid Connection.
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_namespace = azure.relay.Namespace("exampleNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Standard")
example_hybrid_connection = azure.relay.HybridConnection("exampleHybridConnection",
resource_group_name=example_resource_group.name,
relay_namespace_name=example_namespace.name,
user_metadata="examplemetadata")
example_appservice_hybrid_connection_hybrid_connection = azure.appservice.HybridConnection("exampleAppservice/hybridConnectionHybridConnection",
app_service_name=example_app_service.name,
resource_group_name=example_resource_group.name,
relay_id=example_hybrid_connection.id,
hostname="testhostname.example",
port=8080,
send_key_name="exampleSharedAccessKey")
```
## Import
App Service Hybrid Connections can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/hybridConnection:HybridConnection example /subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/exampleResourceGroup1/providers/Microsoft.Web/sites/exampleAppService1/hybridConnectionNamespaces/exampleRN1/relays/exampleRHC1
```
:param str resource_name: The name of the resource.
:param HybridConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HybridConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
relay_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
send_key_name: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HybridConnectionArgs.__new__(HybridConnectionArgs)
if app_service_name is None and not opts.urn:
raise TypeError("Missing required property 'app_service_name'")
__props__.__dict__["app_service_name"] = app_service_name
if hostname is None and not opts.urn:
raise TypeError("Missing required property 'hostname'")
__props__.__dict__["hostname"] = hostname
if port is None and not opts.urn:
raise TypeError("Missing required property 'port'")
__props__.__dict__["port"] = port
if relay_id is None and not opts.urn:
raise TypeError("Missing required property 'relay_id'")
__props__.__dict__["relay_id"] = relay_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["send_key_name"] = send_key_name
__props__.__dict__["namespace_name"] = None
__props__.__dict__["relay_name"] = None
__props__.__dict__["send_key_value"] = None
__props__.__dict__["service_bus_namespace"] = None
__props__.__dict__["service_bus_suffix"] = None
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["sendKeyValue"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(HybridConnection, __self__).__init__(
'azure:appservice/hybridConnection:HybridConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
relay_id: Optional[pulumi.Input[str]] = None,
relay_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
send_key_name: Optional[pulumi.Input[str]] = None,
send_key_value: Optional[pulumi.Input[str]] = None,
service_bus_namespace: Optional[pulumi.Input[str]] = None,
service_bus_suffix: Optional[pulumi.Input[str]] = None) -> 'HybridConnection':
"""
Get an existing HybridConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: Specifies the name of the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] hostname: The hostname of the endpoint.
:param pulumi.Input[str] namespace_name: The name of the Relay Namespace.
:param pulumi.Input[int] port: The port of the endpoint.
:param pulumi.Input[str] relay_id: The ID of the Service Bus Relay. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] send_key_name: The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
:param pulumi.Input[str] send_key_value: The value of the Service Bus Primary Access key.
:param pulumi.Input[str] service_bus_namespace: The name of the Service Bus namespace.
:param pulumi.Input[str] service_bus_suffix: The suffix for the service bus endpoint.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HybridConnectionState.__new__(_HybridConnectionState)
__props__.__dict__["app_service_name"] = app_service_name
__props__.__dict__["hostname"] = hostname
__props__.__dict__["namespace_name"] = namespace_name
__props__.__dict__["port"] = port
__props__.__dict__["relay_id"] = relay_id
__props__.__dict__["relay_name"] = relay_name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["send_key_name"] = send_key_name
__props__.__dict__["send_key_value"] = send_key_value
__props__.__dict__["service_bus_namespace"] = service_bus_namespace
__props__.__dict__["service_bus_suffix"] = service_bus_suffix
return HybridConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the App Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "app_service_name")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[str]:
"""
The hostname of the endpoint.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Output[str]:
"""
The name of the Relay Namespace.
"""
return pulumi.get(self, "namespace_name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
The port of the endpoint.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="relayId")
def relay_id(self) -> pulumi.Output[str]:
"""
The ID of the Service Bus Relay. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "relay_id")
@property
@pulumi.getter(name="relayName")
def relay_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "relay_name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the App Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="sendKeyName")
def send_key_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the Service Bus key which has Send permissions. Defaults to `RootManageSharedAccessKey`.
"""
return pulumi.get(self, "send_key_name")
@property
@pulumi.getter(name="sendKeyValue")
def send_key_value(self) -> pulumi.Output[str]:
"""
The value of the Service Bus Primary Access key.
"""
return pulumi.get(self, "send_key_value")
@property
@pulumi.getter(name="serviceBusNamespace")
def service_bus_namespace(self) -> pulumi.Output[str]:
"""
The name of the Service Bus namespace.
"""
return pulumi.get(self, "service_bus_namespace")
@property
@pulumi.getter(name="serviceBusSuffix")
def service_bus_suffix(self) -> pulumi.Output[str]:
"""
The suffix for the service bus endpoint.
"""
return pulumi.get(self, "service_bus_suffix")
|
d6a62ce5a6b3a9353f03bc40b1a85ccf47bb0922
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/Configuration/python/HLT_75e33/psets/lowPtQuadStepTrajectoryFilter_cfi.py
|
7fe52f635e599e365ea6e51c183e85425cacf826
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 376
|
py
|
lowPtQuadStepTrajectoryFilter_cfi.py
|
import FWCore.ParameterSet.Config as cms
lowPtQuadStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string('CompositeTrajectoryFilter'),
filters = cms.VPSet(
cms.PSet(
refToPSet_ = cms.string('lowPtQuadStepTrajectoryFilterBase')
),
cms.PSet(
refToPSet_ = cms.string('ClusterShapeTrajectoryFilter')
)
)
)
|
b3d334902e36cd7bac29c4637c69ca1602609b1f
|
1ccc111bcdc11bf10309d0ec6df731b6105b6d95
|
/tests/test_cu/models.py
|
e9c8202dd9d4b2513bd7b4284b6b97c296b863e5
|
[
"BSD-3-Clause"
] |
permissive
|
django/django-localflavor
|
1d7b2d84a85302bf2c0a04b30ece69705c56dea9
|
c539612758ded5fcb3fbb5cb5ff20f1b0de96fcc
|
refs/heads/master
| 2023-09-03T03:15:09.070463
| 2023-04-24T09:13:53
| 2023-04-24T09:13:53
| 10,264,130
| 703
| 274
|
BSD-3-Clause
| 2023-07-28T04:48:02
| 2013-05-24T10:20:36
|
Python
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
models.py
|
from django.db import models
from localflavor.cu.models import CUIdentityCardNumberField, CUPostalCodeField, CUProvinceField, CURegionField
class CUSomebody(models.Model):
province_1 = CUProvinceField()
province_2 = CUProvinceField(blank=True)
region_1 = CURegionField()
region_2 = CURegionField(blank=True)
postal_code = CUPostalCodeField()
id_number = CUIdentityCardNumberField()
|
1675a2d972d9f20d7998215a9ccd0772868a3b83
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/jobs/transforms/validation/question_validation_test.py
|
c2e238b17857b64318e476323e522369ff1ce73e
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 9,676
|
py
|
question_validation_test.py
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.transforms.question_validation."""
from __future__ import annotations
from core.jobs import job_test_utils
from core.jobs.decorators import validation_decorators
from core.jobs.transforms.validation import question_validation
from core.jobs.types import base_validation_errors
from core.platform import models
from core.tests import test_utils
import apache_beam as beam
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import question_models
(base_models, question_models) = models.Registry.import_models(
[models.Names.BASE_MODEL, models.Names.QUESTION])
class ValidateQuestionSnapshotMetadataModelTests(
job_test_utils.PipelinedTestBase):
def test_validate_change_domain_implemented(self) -> None:
invalid_commit_cmd_model = (
question_models.QuestionSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='commiter-id',
commit_type='delete',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [])
def test_change_dict_without_cmd(self) -> None:
invalid_commit_cmd_model = (
question_models.QuestionSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='commiter-id',
commit_type='delete',
commit_cmds=[{'invalid': 'data'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'invalid': 'data'},
'Missing cmd key in change dict')
])
def test_change_dict_with_invalid_cmd(self) -> None:
invalid_commit_cmd_model = (
question_models.QuestionSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='commiter-id',
commit_type='delete',
commit_cmds=[{'cmd': 'invalid'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': 'invalid'},
'Command invalid is not allowed')
])
def test_change_dict_with_missing_attributes_in_cmd(self) -> None:
commit_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'old_value': 'old_value'
}
invalid_commit_cmd_model = (
question_models.QuestionSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='commiter-id',
commit_type='edit',
commit_cmds=[commit_dict])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'The following required attributes are missing: new_value')
])
def test_change_dict_with_extra_attributes_in_cmd(self) -> None:
invalid_commit_cmd_model = (
question_models.QuestionSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='commiter-id',
commit_type='create',
commit_cmds=[{'cmd': 'create_new', 'invalid': 'invalid'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': 'create_new', 'invalid': 'invalid'},
'The following extra attributes are present: invalid')
])
def test_update_question_property_with_wrong_property_name(self) -> None:
commit_dict = {
'cmd': 'update_question_property',
'property_name': 'wrong',
'new_value': 'new_value',
'old_value': 'old_value'
}
invalid_commit_cmd_model = (
question_models.QuestionSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='commiter-id',
commit_type='edit',
commit_cmds=[commit_dict])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'Value for property_name in cmd update_question_property: '
'wrong is not allowed')
])
class RelationshipsOfTests(test_utils.TestBase):
def test_question_skill_link_model_relationships(self) -> None:
self.assertItemsEqual(
validation_decorators.RelationshipsOf.get_model_kind_references(
'QuestionSkillLinkModel', 'id'), ['QuestionModel'])
self.assertItemsEqual(
validation_decorators.RelationshipsOf.get_model_kind_references(
'QuestionSkillLinkModel', 'skill_id'), ['SkillModel'])
def test_question_commit_log_entry_model_relationships(self) -> None:
self.assertItemsEqual(
validation_decorators.RelationshipsOf.get_model_kind_references(
'QuestionCommitLogEntryModel', 'question_id'),
['QuestionModel'])
def test_question_summary_model_relationships(self) -> None:
self.assertItemsEqual(
validation_decorators.RelationshipsOf.get_model_kind_references(
'QuestionSummaryModel', 'id'), ['QuestionModel'])
class ValidateQuestionCommitLogEntryModelTests(
job_test_utils.PipelinedTestBase):
def test_validate_question_model(self) -> None:
invalid_commit_cmd_model = (
question_models.QuestionCommitLogEntryModel(
id='question_123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
question_id='123',
user_id='',
commit_type='delete',
post_commit_status='private',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionCommitLogEntryModel())
)
self.assert_pcoll_equal(output, [])
def test_raises_commit_cmd_none_error(self) -> None:
invalid_commit_cmd_model = (
question_models.QuestionCommitLogEntryModel(
id='model_123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
question_id='123',
user_id='',
commit_type='delete',
post_commit_status='private',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
question_validation.ValidateQuestionCommitLogEntryModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsNoneError(invalid_commit_cmd_model)
])
|
ea1c4d3312f95e1cc7e7a51201ba8d2daba60111
|
54ab1a59d528c4f6e15193daf129e6d58856ba8b
|
/guess.py
|
8fb3b11ef471b7cb354898fc2165f20e01a2ffa6
|
[] |
no_license
|
mjhea0/python-ruby
|
bfd52b3f790b33924aea9b52457685a534e18aaf
|
c79432e1fb64db865209911db1ae66c3b9b1f5a7
|
refs/heads/master
| 2023-06-30T20:26:18.940912
| 2021-12-04T15:48:32
| 2021-12-04T15:48:32
| 16,994,945
| 265
| 43
| null | 2021-12-04T15:48:33
| 2014-02-19T18:55:32
|
Python
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
guess.py
|
#!/usr/bin/python3
import random
number = random.randint(1, 20)
guesses = 0
print('Hello! What is your name?')
name = input()
print(f"Hi, {name}. I'm thinking of a number from 1 and 20.")
while guesses < 6:
print(f'What is your guess? You have {6 - guesses} more guesses.')
guess = input()
guess = int(guess)
guesses = guesses + 1
if guess < number:
print('Too low.')
elif guess > number:
print('Too high.')
elif guess == number:
print(f'Good job, {name}! You guessed my number in {guesses} guesses!')
break
if guess != number:
print(f'Nope. The number I was thinking of was {number}.')
|
35cc2ad414d7872c87273bf8ae5922dce5f214f7
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayEbppInvoiceExpenserulesProjectemployeeModifyModel.py
|
97882dccbaf29eceff776d8970f2280fefd21f0c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,579
|
py
|
AlipayEbppInvoiceExpenserulesProjectemployeeModifyModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppInvoiceExpenserulesProjectemployeeModifyModel(object):
def __init__(self):
self._account_id = None
self._add_employee_list = None
self._add_employee_open_id_list = None
self._agreement_no = None
self._project_id = None
self._remove_employee_list = None
self._remove_employee_open_id_list = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def add_employee_list(self):
return self._add_employee_list
@add_employee_list.setter
def add_employee_list(self, value):
if isinstance(value, list):
self._add_employee_list = list()
for i in value:
self._add_employee_list.append(i)
@property
def add_employee_open_id_list(self):
return self._add_employee_open_id_list
@add_employee_open_id_list.setter
def add_employee_open_id_list(self, value):
if isinstance(value, list):
self._add_employee_open_id_list = list()
for i in value:
self._add_employee_open_id_list.append(i)
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, value):
self._project_id = value
@property
def remove_employee_list(self):
return self._remove_employee_list
@remove_employee_list.setter
def remove_employee_list(self, value):
if isinstance(value, list):
self._remove_employee_list = list()
for i in value:
self._remove_employee_list.append(i)
@property
def remove_employee_open_id_list(self):
return self._remove_employee_open_id_list
@remove_employee_open_id_list.setter
def remove_employee_open_id_list(self, value):
if isinstance(value, list):
self._remove_employee_open_id_list = list()
for i in value:
self._remove_employee_open_id_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.account_id:
if hasattr(self.account_id, 'to_alipay_dict'):
params['account_id'] = self.account_id.to_alipay_dict()
else:
params['account_id'] = self.account_id
if self.add_employee_list:
if isinstance(self.add_employee_list, list):
for i in range(0, len(self.add_employee_list)):
element = self.add_employee_list[i]
if hasattr(element, 'to_alipay_dict'):
self.add_employee_list[i] = element.to_alipay_dict()
if hasattr(self.add_employee_list, 'to_alipay_dict'):
params['add_employee_list'] = self.add_employee_list.to_alipay_dict()
else:
params['add_employee_list'] = self.add_employee_list
if self.add_employee_open_id_list:
if isinstance(self.add_employee_open_id_list, list):
for i in range(0, len(self.add_employee_open_id_list)):
element = self.add_employee_open_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.add_employee_open_id_list[i] = element.to_alipay_dict()
if hasattr(self.add_employee_open_id_list, 'to_alipay_dict'):
params['add_employee_open_id_list'] = self.add_employee_open_id_list.to_alipay_dict()
else:
params['add_employee_open_id_list'] = self.add_employee_open_id_list
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.project_id:
if hasattr(self.project_id, 'to_alipay_dict'):
params['project_id'] = self.project_id.to_alipay_dict()
else:
params['project_id'] = self.project_id
if self.remove_employee_list:
if isinstance(self.remove_employee_list, list):
for i in range(0, len(self.remove_employee_list)):
element = self.remove_employee_list[i]
if hasattr(element, 'to_alipay_dict'):
self.remove_employee_list[i] = element.to_alipay_dict()
if hasattr(self.remove_employee_list, 'to_alipay_dict'):
params['remove_employee_list'] = self.remove_employee_list.to_alipay_dict()
else:
params['remove_employee_list'] = self.remove_employee_list
if self.remove_employee_open_id_list:
if isinstance(self.remove_employee_open_id_list, list):
for i in range(0, len(self.remove_employee_open_id_list)):
element = self.remove_employee_open_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.remove_employee_open_id_list[i] = element.to_alipay_dict()
if hasattr(self.remove_employee_open_id_list, 'to_alipay_dict'):
params['remove_employee_open_id_list'] = self.remove_employee_open_id_list.to_alipay_dict()
else:
params['remove_employee_open_id_list'] = self.remove_employee_open_id_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppInvoiceExpenserulesProjectemployeeModifyModel()
if 'account_id' in d:
o.account_id = d['account_id']
if 'add_employee_list' in d:
o.add_employee_list = d['add_employee_list']
if 'add_employee_open_id_list' in d:
o.add_employee_open_id_list = d['add_employee_open_id_list']
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'project_id' in d:
o.project_id = d['project_id']
if 'remove_employee_list' in d:
o.remove_employee_list = d['remove_employee_list']
if 'remove_employee_open_id_list' in d:
o.remove_employee_open_id_list = d['remove_employee_open_id_list']
return o
|
14d9e0008a0f5df18719f31d506d720e21bc8cd2
|
cf018ce220ee461704e7fedf842dbf074025da54
|
/pytest/test_002_personalize_reset.py
|
dfacdeba0a3ad740342c02f0f53581c74cfb5078
|
[] |
no_license
|
solokeys/openpgp
|
fdc0096b2f294746959519e5858aa9eec841ecf7
|
5e5080f2b258b11921573dab87167d69cebef00d
|
refs/heads/master
| 2021-06-26T05:53:10.663925
| 2021-06-14T16:11:17
| 2021-06-14T16:11:17
| 187,276,756
| 122
| 11
| null | 2021-06-25T01:30:28
| 2019-05-17T20:20:30
|
C
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
test_002_personalize_reset.py
|
from card_test_personalize_reset import *
|
21970d73f6360a5b8bf598a885f89029545cb6be
|
76f23cc69dc10c44bc7cf00b78e37db04c7a9c45
|
/datalad/distribution/utils.py
|
3ef3ca4be07d45047f5f232e57ad4f83572eb244
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
datalad/datalad
|
2d9c247344d340325ba84e7ab674ac320e57f30c
|
40332b5ad25bf8744f7399f6c3575f7d28f71384
|
refs/heads/maint
| 2023-09-04T11:03:02.264714
| 2023-08-10T15:56:19
| 2023-08-10T15:56:19
| 14,052,034
| 453
| 134
|
NOASSERTION
| 2023-09-14T19:10:18
| 2013-11-01T19:40:08
|
Python
|
UTF-8
|
Python
| false
| false
| 5,157
|
py
|
utils.py
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Distribution utility functions
"""
import logging
from os.path import (
isabs,
join as opj,
normpath,
)
import posixpath
from datalad.log import log_progress
from datalad.support.annexrepo import AnnexRepo
from datalad.support.network import (
PathRI,
RI,
URL,
)
lgr = logging.getLogger('datalad.distribution.utils')
def _get_flexible_source_candidates(src, base_url=None, alternate_suffix=True):
"""Get candidates to try cloning from.
Primarily to mitigate the problem that git doesn't append /.git
while cloning from non-bare repos over dummy protocol (http*). Also to
simplify creation of urls whenever base url and relative path within it
provided
Parameters
----------
src : string or RI
Full or relative (then considered within base_url if provided) path
base_url : string or RI, optional
alternate_suffix : bool
Whether to generate URL candidates with and without '/.git' suffixes.
Returns
-------
candidates : list of str
List of RIs (path, url, ssh targets) to try to install from
"""
candidates = []
ri = RI(src)
if isinstance(ri, PathRI) and not isabs(ri.path) and base_url:
ri = RI(base_url)
if ri.path.endswith('/.git'):
base_path = ri.path[:-5]
base_suffix = '.git'
else:
base_path = ri.path
base_suffix = ''
if isinstance(ri, PathRI):
# this is a path, so stay native
ri.path = normpath(opj(base_path, src, base_suffix))
else:
# we are handling a URL, use POSIX path conventions
ri.path = posixpath.normpath(
posixpath.join(base_path, src, base_suffix))
src = str(ri)
candidates.append(src)
if alternate_suffix and isinstance(ri, URL):
if ri.scheme in {'http', 'https'}:
# additionally try to consider .git:
if not src.rstrip('/').endswith('/.git'):
candidates.append(
'{0}/.git'.format(src.rstrip('/')))
return candidates
def _yield_ds_w_matching_siblings(
ds, names, recursive=False, recursion_limit=None):
"""(Recursively) inspect a dataset for siblings with particular name(s)
Parameters
----------
ds: Dataset
The dataset to be inspected.
names: iterable
Sibling names (str) to test for.
recursive: bool, optional
Whether to recurse into subdatasets.
recursion_limit: int, optional
Recursion depth limit.
Yields
------
str, str
Path to the dataset with a matching sibling, and name of the matching
sibling in that dataset.
"""
def _discover_all_remotes(ds, refds, **kwargs):
"""Helper to be run on all relevant datasets via foreach
"""
# Note, that `siblings` doesn't tell us about not enabled special
# remotes. There could still be conflicting names we need to know
# about in order to properly deal with the `existing` switch.
repo = ds.repo
# list of known git remotes
if isinstance(repo, AnnexRepo):
remotes = repo.get_remotes(exclude_special_remotes=True)
remotes.extend([v['name']
for k, v in repo.get_special_remotes().items()]
)
else:
remotes = repo.get_remotes()
return remotes
if not recursive:
for name in _discover_all_remotes(ds, ds):
if name in names:
yield ds.path, name
return
# in recursive mode this check could take a substantial amount of
# time: employ a progress bar (or rather a counter, because we don't
# know the total in advance
pbar_id = 'check-siblings-{}'.format(id(ds))
log_progress(
lgr.info, pbar_id,
'Start checking pre-existing sibling configuration %s', ds,
label='Query siblings',
unit=' Siblings',
)
for res in ds.foreach_dataset(
_discover_all_remotes,
recursive=recursive,
recursion_limit=recursion_limit,
return_type='generator',
result_renderer='disabled',
):
# unwind result generator
if 'result' in res:
for name in res['result']:
log_progress(
lgr.info, pbar_id,
'Discovered sibling %s in dataset at %s',
name, res['path'],
update=1,
increment=True)
if name in names:
yield res['path'], name
log_progress(
lgr.info, pbar_id,
'Finished checking pre-existing sibling configuration %s', ds,
)
|
d10638a463dd7c2fae7aadc93c0cecc9fe58ba42
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/cognitiveservices/azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/operations/_train_operations.py
|
c48ed5feb317af90e53f8867af314c322597823d
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,694
|
py
|
_train_operations.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class TrainOperations(object):
"""TrainOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def train_version(
self, app_id, version_id, custom_headers=None, raw=False, **operation_config):
"""Sends a training request for a version of a specified LUIS app. This
POST request initiates a request asynchronously. To determine whether
the training request is successful, submit a GET request to get
training status. Note: The application version is not fully trained
unless all the models (intents and entities) are trained successfully
or are up to date. To verify training success, get the training status
at least once after training is complete.
:param app_id: The application ID.
:type app_id: str
:param version_id: The version ID.
:type version_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: EnqueueTrainingResponse or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.EnqueueTrainingResponse
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
# Construct URL
url = self.train_version.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str'),
'versionId': self._serialize.url("version_id", version_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('EnqueueTrainingResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
train_version.metadata = {'url': '/apps/{appId}/versions/{versionId}/train'}
def get_status(
self, app_id, version_id, custom_headers=None, raw=False, **operation_config):
"""Gets the training status of all models (intents and entities) for the
specified LUIS app. You must call the train API to train the LUIS app
before you call this API to get training status. "appID" specifies the
LUIS app ID. "versionId" specifies the version number of the LUIS app.
For example, "0.1".
:param app_id: The application ID.
:type app_id: str
:param version_id: The version ID.
:type version_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.cognitiveservices.language.luis.authoring.models.ModelTrainingInfo]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_status.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str'),
'versionId': self._serialize.url("version_id", version_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[ModelTrainingInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_status.metadata = {'url': '/apps/{appId}/versions/{versionId}/train'}
|
6037bac294374fa4c504c6c302ff2aa597a3dcae
|
7af0ff378525ef6132f74bac0b1eb54ce4c40c08
|
/indico/modules/events/registration/forms.py
|
8c0077cd1f93856de8b0fb21499fabeff0af9186
|
[
"MIT"
] |
permissive
|
indico/indico
|
1126ee0ac3e9d36510a64989ce71be9c02680831
|
463951511d3a8409f944f98f29875c4323f3e897
|
refs/heads/master
| 2023-08-31T11:15:00.092526
| 2023-08-30T11:07:25
| 2023-08-30T11:07:25
| 2,113,067
| 1,549
| 429
|
MIT
| 2023-09-13T20:09:56
| 2011-07-27T13:56:30
|
Python
|
UTF-8
|
Python
| false
| false
| 34,465
|
py
|
forms.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import time, timedelta
from operator import itemgetter
import jsonschema
from flask import request
from wtforms.fields import (BooleanField, DecimalField, EmailField, FloatField, HiddenField, IntegerField, SelectField,
StringField, TextAreaField)
from wtforms.validators import DataRequired, Email, InputRequired, NumberRange, Optional, ValidationError
from wtforms.widgets import NumberInput
from indico.core import signals
from indico.core.config import config
from indico.core.db import db
from indico.modules.designer import PageLayout, PageOrientation, PageSize, TemplateType
from indico.modules.designer.util import get_default_ticket_on_category, get_inherited_templates
from indico.modules.events.features.util import is_feature_enabled
from indico.modules.events.payment import payment_settings
from indico.modules.events.registration.models.forms import ModificationMode
from indico.modules.events.registration.models.invitations import RegistrationInvitation
from indico.modules.events.registration.models.items import RegistrationFormItem
from indico.modules.events.registration.models.registrations import PublishRegistrationsMode, Registration
from indico.modules.events.registration.models.tags import RegistrationTag
from indico.util.i18n import _
from indico.util.placeholders import get_missing_placeholders, render_placeholder_info
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import EmailListField, FileField, IndicoDateTimeField, IndicoEnumSelectField, JSONField
from indico.web.forms.fields.colors import SUIColorPickerField
from indico.web.forms.fields.datetime import TimeDeltaField
from indico.web.forms.fields.principals import PrincipalListField
from indico.web.forms.fields.simple import (HiddenFieldList, IndicoEmailRecipientsField, IndicoMultipleTagSelectField,
IndicoParticipantVisibilityField)
from indico.web.forms.validators import HiddenUnless, IndicoEmail, LinkedDateTime
from indico.web.forms.widgets import CKEditorWidget, SwitchWidget
def _check_if_payment_required(form, field):
if not field.data:
return
if not is_feature_enabled(form.event, 'payment'):
raise ValidationError(_('You have to enable the payment feature in order to set a registration fee.'))
class RegistrationFormEditForm(IndicoForm):
_price_fields = ('currency', 'base_price')
_registrant_notification_fields = ('notification_sender_address', 'message_pending', 'message_unpaid',
'message_complete', 'attach_ical')
_manager_notification_fields = ('manager_notifications_enabled', 'manager_notification_recipients')
_special_fields = _price_fields + _registrant_notification_fields + _manager_notification_fields
title = StringField(_('Title'), [DataRequired()], description=_('The title of the registration form'))
introduction = TextAreaField(_('Introduction'),
description=_('Introduction to be displayed when filling out the registration form'))
contact_info = StringField(_('Contact info'),
description=_('How registrants can get in touch with somebody for extra information'))
moderation_enabled = BooleanField(_('Moderated'), widget=SwitchWidget(),
description=_('If enabled, registrations require manager approval'))
require_login = BooleanField(_('Only logged-in users'), widget=SwitchWidget(),
description=_('Users must be logged in to register'))
require_user = BooleanField(_('Registrant must have account'), widget=SwitchWidget(),
description=_('Registrations emails must be associated with an Indico account'))
require_captcha = BooleanField(_('Require CAPTCHA'), widget=SwitchWidget(),
description=_('When registering, users with no account have to answer a CAPTCHA'))
limit_registrations = BooleanField(_('Limit registrations'), widget=SwitchWidget(),
description=_('Whether there is a limit of registrations'))
registration_limit = IntegerField(_('Capacity'), [HiddenUnless('limit_registrations'), DataRequired(),
NumberRange(min=1)],
description=_('Maximum number of registrations'))
modification_mode = IndicoEnumSelectField(_('Modification allowed'), enum=ModificationMode,
description=_('Will users be able to modify their data? When?'))
publish_registration_count = BooleanField(_('Publish number of registrations'), widget=SwitchWidget(),
description=_('Number of registered participants will be displayed on '
'the event page'))
publish_checkin_enabled = BooleanField(_('Publish check-in status'), widget=SwitchWidget(),
description=_('Check-in status will be shown publicly on the event page'))
base_price = DecimalField(_('Registration fee'), [NumberRange(min=0, max=999999.99), Optional(),
_check_if_payment_required], filters=[lambda x: x if x is not None else 0],
widget=NumberInput(step='0.01'),
description=_('A fixed fee all users have to pay when registering.'))
currency = SelectField(_('Currency'), [DataRequired()], description=_('The currency for new registrations'))
notification_sender_address = StringField(_('Notification sender address'), [IndicoEmail()],
filters=[lambda x: (x or None)])
message_pending = TextAreaField(
_('Message for pending registrations'),
description=_('Text included in emails sent to pending registrations (Markdown syntax)')
)
message_unpaid = TextAreaField(
_('Message for unpaid registrations'),
description=_('Text included in emails sent to unpaid registrations (Markdown syntax)')
)
message_complete = TextAreaField(
_('Message for complete registrations'),
description=_('Text included in emails sent to complete registrations (Markdown syntax)')
)
attach_ical = BooleanField(
_('Attach iCalendar file'),
widget=SwitchWidget(),
description=_('Attach an iCalendar file to the mail sent once a registration is complete')
)
manager_notifications_enabled = BooleanField(_('Enabled'), widget=SwitchWidget(),
description=_('Enable notifications to managers about registrations'))
manager_notification_recipients = EmailListField(_('List of recipients'),
[HiddenUnless('manager_notifications_enabled',
preserve_data=True), DataRequired()],
description=_('Email addresses that will receive notifications'))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.regform = kwargs.pop('regform', None)
super().__init__(*args, **kwargs)
self._set_currencies()
self.notification_sender_address.description = _('Email address set as the sender of all '
'notifications sent to users. If empty, '
'then {email} is used.').format(email=config.NO_REPLY_EMAIL)
def _set_currencies(self):
currencies = [(c['code'], '{0[code]} ({0[name]})'.format(c)) for c in payment_settings.get('currencies')]
self.currency.choices = sorted(currencies, key=lambda x: x[1].lower())
class RegistrationFormCreateForm(IndicoForm):
_meeting_fields = ('visibility', 'retention_period') # The meeting regform has a default title
_conference_fields = ('title', 'visibility', 'retention_period')
title = StringField(_('Title'), [DataRequired()], description=_('The title of the registration form'))
visibility = IndicoParticipantVisibilityField(_('Participant list visibility'),
description=_('Specify under which conditions the participant list '
'will be visible to other participants and everyone '
'else who can access the event'))
retention_period = TimeDeltaField(_('Retention period'), units=('weeks',),
description=_('Specify for how many weeks the registration '
'data, including the participant list, should be stored. '
'Retention periods for individual fields can be set in the '
'registration form designer'),
render_kw={'placeholder': _('Indefinite')})
def validate_visibility(self, field):
participant_visibility, public_visibility = (PublishRegistrationsMode[v] for v in field.data[:-1])
if participant_visibility.value < public_visibility.value:
raise ValidationError(_('Participant visibility cannot be more restrictive for other participants than '
'for the public'))
if field.data[2] is not None:
visibility_duration = timedelta(weeks=field.data[2])
if visibility_duration <= timedelta():
raise ValidationError(_('The visibility duration cannot be zero.'))
elif visibility_duration > timedelta(days=3650):
raise ValidationError(_('The visibility duration cannot be longer than 10 years. Leave the field empty '
'for indefinite.'))
def validate_retention_period(self, field):
retention_period = field.data
if retention_period is None:
return
elif retention_period <= timedelta():
raise ValidationError(_('The retention period cannot be zero or negative.'))
elif retention_period > timedelta(days=3650):
raise ValidationError(_('The retention period cannot be longer than 10 years. Leave the field empty for '
'indefinite.'))
visibility_duration = (timedelta(weeks=self.visibility.data[2]) if self.visibility.data[2] is not None
else None)
if visibility_duration and visibility_duration > retention_period:
raise ValidationError(_('The retention period cannot be lower than the visibility duration.'))
class RegistrationFormScheduleForm(IndicoForm):
start_dt = IndicoDateTimeField(_('Start'), [Optional()], default_time=time(0, 0),
description=_('Moment when registrations will be open'))
end_dt = IndicoDateTimeField(_('End'), [Optional(), LinkedDateTime('start_dt')], default_time=time(23, 59),
description=_('Moment when registrations will be closed'))
modification_end_dt = IndicoDateTimeField(_('Modification deadline'), [Optional(), LinkedDateTime('end_dt')],
default_time=time(23, 59),
description=_('Deadline until which registration information can be '
'modified (defaults to the end date if empty)'))
def __init__(self, *args, **kwargs):
regform = kwargs.pop('regform')
self.timezone = regform.event.timezone
super().__init__(*args, **kwargs)
class InvitationFormBase(IndicoForm):
_invitation_fields = ('skip_moderation',)
_email_fields = ('email_from', 'email_subject', 'email_body')
email_from = SelectField(_('From'), [DataRequired()])
email_subject = StringField(_('Email subject'), [DataRequired()])
email_body = TextAreaField(_('Email body'), [DataRequired()], widget=CKEditorWidget())
skip_moderation = BooleanField(_('Skip moderation'), widget=SwitchWidget(),
description=_("If enabled, the user's registration will be approved automatically."))
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
event = self.regform.event
super().__init__(*args, **kwargs)
if not self.regform.moderation_enabled:
del self.skip_moderation
self.email_from.choices = list(event.get_allowed_sender_emails().items())
self.email_body.description = render_placeholder_info('registration-invitation-email', invitation=None)
def validate_email_body(self, field):
missing = get_missing_placeholders('registration-invitation-email', field.data, invitation=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
class InvitationFormNew(InvitationFormBase):
_invitation_fields = ('first_name', 'last_name', 'email', 'affiliation') + InvitationFormBase._invitation_fields
first_name = StringField(_('First name'), [DataRequired()],
description=_('The first name of the user you are inviting.'))
last_name = StringField(_('Last name'), [DataRequired()],
description=_('The last name of the user you are inviting.'))
email = EmailField(_('Email'), [DataRequired(), Email()], filters=[lambda x: x.lower() if x else x],
description=_('The invitation will be sent to this address.'))
affiliation = StringField(_('Affiliation'),
description=_('The affiliation of the user you are inviting.'))
@generated_data
def users(self):
return [{'first_name': self.first_name.data,
'last_name': self.last_name.data,
'email': self.email.data,
'affiliation': self.affiliation.data}]
def validate_email(self, field):
if RegistrationInvitation.query.filter_by(email=field.data).with_parent(self.regform).has_rows():
raise ValidationError(_('There is already an invitation with this email address.'))
if Registration.query.filter_by(email=field.data, is_active=True).with_parent(self.regform).has_rows():
raise ValidationError(_('There is already a registration with this email address.'))
class InvitationFormExisting(InvitationFormBase):
_invitation_fields = ('users_field',) + InvitationFormBase._invitation_fields
users_field = PrincipalListField(_('Users'), [DataRequired()], allow_external_users=True,
description=_('Select the users to invite.'))
@generated_data
def users(self):
return [{'first_name': x.first_name,
'last_name': x.last_name,
'email': x.email.lower(),
'affiliation': x.affiliation}
for x in self.users_field.data]
def validate_users_field(self, field):
emails = {x.email.lower() for x in field.data}
# invitations
existing = {x.email for x in self.regform.invitations} & emails
if existing:
raise ValidationError(_('There are already invitations for the following email addresses: {emails}')
.format(emails=', '.join(sorted(existing))))
# registrations
existing = {x.email for x in self.regform.registrations if x.is_active} & emails
if existing:
raise ValidationError(_('There are already registrations with the following email addresses: {emails}')
.format(emails=', '.join(sorted(existing))))
class ImportInvitationsForm(InvitationFormBase):
_invitation_fields = ('source_file', 'skip_existing') + InvitationFormBase._invitation_fields
source_file = FileField(_('Source File'), [DataRequired(_('You need to upload a CSV file.'))],
accepted_file_types='.csv')
skip_existing = BooleanField(_('Skip existing invitations'), widget=SwitchWidget(), default=False,
description=_('If enabled, users with existing invitations will be ignored.'))
class EmailRegistrantsForm(IndicoForm):
from_address = SelectField(_('From'), [DataRequired()])
cc_addresses = EmailListField(_('CC'),
description=_('Beware, addresses in this field will receive one mail per '
'registrant.'))
subject = StringField(_('Subject'), [DataRequired()])
body = TextAreaField(_('Email body'), [DataRequired()], widget=CKEditorWidget())
recipients = IndicoEmailRecipientsField(_('Recipients'))
copy_for_sender = BooleanField(_('Send copy to me'), widget=SwitchWidget(),
description=_('Send copy of each email to my mailbox'))
attach_ticket = BooleanField(_('Attach ticket'), widget=SwitchWidget(),
description=_('Attach tickets to emails'))
registration_id = HiddenFieldList()
submitted = HiddenField()
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
event = self.regform.event
super().__init__(*args, **kwargs)
self.from_address.choices = list(event.get_allowed_sender_emails().items())
self.body.description = render_placeholder_info('registration-email', regform=self.regform, registration=None)
def validate_body(self, field):
missing = get_missing_placeholders('registration-email', field.data, regform=self.regform, registration=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
class TicketsForm(IndicoForm):
tickets_enabled = BooleanField(_('Enable Tickets'), widget=SwitchWidget(),
description=_('Create tickets for registrations using this registration form.'))
ticket_on_email = BooleanField(_('Send with an e-mail'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Attach PDF ticket to the email sent to a user after completing '
'their registration.'))
ticket_on_event_page = BooleanField(_('Download from event homepage'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Allow users to download their ticket from the '
'conference homepage.'))
ticket_on_summary_page = BooleanField(_('Download from summary page'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Allow users to download their ticket from the registration '
'summary page.'))
tickets_for_accompanying_persons = BooleanField(_('Tickets for accompanying persons'),
[HiddenUnless('tickets_enabled', preserve_data=True)],
widget=SwitchWidget(),
description=_("Create tickets for each of the user's accompanying "
'persons.'))
ticket_template_id = SelectField(_('Ticket template'), [HiddenUnless('tickets_enabled', preserve_data=True),
Optional()], coerce=int)
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super().__init__(*args, **kwargs)
default_tpl = get_default_ticket_on_category(event.category)
all_templates = set(event.designer_templates) | get_inherited_templates(event)
badge_templates = [(tpl.id, tpl.title) for tpl in all_templates
if tpl.type == TemplateType.badge and tpl != default_tpl]
# Show the default template first
badge_templates.insert(0, (default_tpl.id, '{} ({})'.format(default_tpl.title, _('Default category template'))))
self.ticket_template_id.choices = badge_templates
class ParticipantsDisplayForm(IndicoForm):
"""Form to customize the display of the participant list."""
json = JSONField()
def __init__(self, *args, **kwargs):
self.regforms = kwargs.pop('regforms')
super().__init__(*args, **kwargs)
def validate_json(self, field):
schema = {
'type': 'object',
'properties': {
'merge_forms': {'type': 'boolean'},
'participant_list_forms': {
'type': 'array',
'items': {'type': 'integer'}
},
'participant_list_columns': {
'type': 'array',
'items': {'type': 'string'}
}
}
}
try:
jsonschema.validate(field.data, schema)
except jsonschema.ValidationError as exc:
raise ValidationError(str(exc))
class ParticipantsDisplayFormColumnsForm(IndicoForm):
"""
Form to customize the columns for a particular registration form
on the participant list.
"""
json = JSONField()
def validate_json(self, field):
schema = {
'type': 'object',
'properties': {
'columns': {
'type': 'array',
'items': {'type': 'integer'}
}
}
}
try:
jsonschema.validate(field.data, schema)
except jsonschema.ValidationError as exc:
raise ValidationError(str(exc))
class RegistrationManagersForm(IndicoForm):
"""
Form to manage users with privileges to modify registration-related items.
"""
managers = PrincipalListField(_('Registration managers'), allow_groups=True, allow_event_roles=True,
allow_category_roles=True, allow_emails=True, allow_external_users=True,
description=_('List of users allowed to modify registrations'),
event=lambda form: form.event)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
class CreateMultipleRegistrationsForm(IndicoForm):
"""
Form to create multiple registrations of Indico users at the same time.
"""
user_principals = PrincipalListField(_('Indico users'), [DataRequired()], allow_external_users=True)
notify_users = BooleanField(_('Send e-mail notifications'),
default=True,
description=_('Notify the users about the registration.'),
widget=SwitchWidget())
def __init__(self, *args, **kwargs):
self._regform = kwargs.pop('regform')
open_add_user_dialog = kwargs.pop('open_add_user_dialog', False)
super().__init__(*args, **kwargs)
self.user_principals.open_immediately = open_add_user_dialog
def validate_user_principals(self, field):
for user in field.data:
if user in db.session and self._regform.get_registration(user=user):
raise ValidationError(_('A registration for {} already exists.').format(user.full_name))
elif self._regform.get_registration(email=user.email):
raise ValidationError(_('A registration for {} already exists.').format(user.email))
class BadgeSettingsForm(IndicoForm):
template = SelectField(_('Template'))
save_values = BooleanField(_('Save values for next time'), widget=SwitchWidget(),
description=_('Save these values in the event settings'))
dashed_border = BooleanField(_('Dashed border around each badge'), widget=SwitchWidget(),
description=_('Display a dashed border around each badge'))
page_size = IndicoEnumSelectField(_('Page size'), enum=PageSize, sorted=True)
page_orientation = IndicoEnumSelectField(_('Page orientation'), enum=PageOrientation)
page_layout = IndicoEnumSelectField(_('Page layout'), enum=PageLayout,
description=_('The single sided (foldable) option is only available if the '
'template orientation is the same as the page orientation and '
'its width is exactly half of the page width'))
top_margin = FloatField(_('Top margin'), [InputRequired()])
left_margin = FloatField(_('Left margin'), [InputRequired()])
right_margin = FloatField(_('Right margin'), [InputRequired()])
bottom_margin = FloatField(_('Bottom margin'), [InputRequired()])
margin_columns = FloatField(_('Margin between columns'), [InputRequired()])
margin_rows = FloatField(_('Margin between rows'), [InputRequired()])
submitted = HiddenField()
def __init__(self, event, **kwargs):
all_templates = set(event.designer_templates) | get_inherited_templates(event)
badge_templates = [tpl for tpl in all_templates if tpl.type.name == 'badge']
signals.event.filter_selectable_badges.send(type(self), badge_templates=badge_templates)
tickets = kwargs.pop('tickets')
super().__init__(**kwargs)
self.template.choices = sorted(((str(tpl.id), tpl.title)
for tpl in badge_templates
if tpl.is_ticket == tickets),
key=itemgetter(1))
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
class ImportRegistrationsForm(IndicoForm):
source_file = FileField(_('Source File'), [DataRequired(_('You need to upload a CSV file.'))],
accepted_file_types='.csv')
skip_moderation = BooleanField(_('Skip Moderation'), widget=SwitchWidget(), default=True,
description=_('If enabled, the registration will be immediately accepted'))
notify_users = BooleanField(_('E-mail users'), widget=SwitchWidget(),
description=_('Whether the imported users should receive an e-mail notification'))
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
super().__init__(*args, **kwargs)
if not self.regform.moderation_enabled:
del self.skip_moderation
class RejectRegistrantsForm(IndicoForm):
rejection_reason = TextAreaField(_('Reason'), description=_('You can provide a reason for the rejection here.'))
attach_rejection_reason = BooleanField(_('Attach reason'), widget=SwitchWidget())
registration_id = HiddenFieldList()
submitted = HiddenField()
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
class RegistrationTagForm(IndicoForm):
"""Form to create a new registration tag."""
title = StringField(_('Title'), [DataRequired()])
color = SUIColorPickerField(_('Color'), [DataRequired()])
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.tag = kwargs.pop('tag', None)
super().__init__(*args, **kwargs)
def validate_title(self, field):
query = RegistrationTag.query.with_parent(self.event).filter(
db.func.lower(RegistrationTag.title) == field.data.lower()
)
if self.tag:
query = query.filter(RegistrationTag.id != self.tag.id)
if query.has_rows():
raise ValidationError(_('This title is already in use.'))
class RegistrationTagsAssignForm(IndicoForm):
"""Form to assign registration tags to registrations."""
add = IndicoMultipleTagSelectField(_('Add'), description=_('Select tags to assign'))
remove = IndicoMultipleTagSelectField(_('Remove'), description=_('Select tags to remove'))
registration_id = HiddenFieldList()
submitted = HiddenField()
def validate_remove(self, field):
if set(self.remove.data) & set(self.add.data):
raise ValidationError(_('You cannot add and remove the same tag'))
validate_add = validate_remove
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
class RegistrationPrivacyForm(IndicoForm):
"""Form to set the privacy settings of a registration form"""
visibility = IndicoParticipantVisibilityField(_('Participant list visibility'),
description=_('Specify under which conditions the participant list '
'will be visible to other participants and everyone '
'else who can access the event'))
retention_period = TimeDeltaField(_('Retention period'), units=('weeks',),
description=_('Specify for how many weeks the registration '
'data, including the participant list, should be stored. '
'Retention periods for individual fields can be set in the '
'registration form designer'),
render_kw={'placeholder': _('Indefinite')})
require_privacy_policy_agreement = BooleanField(_('Privacy policy'), widget=SwitchWidget(),
description=_('Specify whether users are required to agree to '
"the event's privacy policy when registering"))
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
super().__init__(*args, **kwargs)
def validate_visibility(self, field):
participant_visibility, public_visibility = (PublishRegistrationsMode[v] for v in field.data[:-1])
if participant_visibility.value < public_visibility.value:
raise ValidationError(_('Participant visibility cannot be more restrictive for other participants than '
'for the public'))
participant_visibility_changed_to_show_all = (
participant_visibility == PublishRegistrationsMode.show_all and
self.regform.publish_registrations_participants != PublishRegistrationsMode.show_all
)
public_visibility_changed_to_show_all = (
public_visibility == PublishRegistrationsMode.show_all and
self.regform.publish_registrations_public != PublishRegistrationsMode.show_all
)
if (
self.regform and
(participant_visibility_changed_to_show_all or public_visibility_changed_to_show_all) and
Registration.query.with_parent(self.regform).filter(~Registration.is_deleted,
~Registration.created_by_manager).has_rows()
):
raise ValidationError(_("'Show all participants' can only be set if there are no registered users."))
if field.data[2] is not None:
visibility_duration = timedelta(weeks=field.data[2])
if visibility_duration <= timedelta():
raise ValidationError(_('The visibility duration cannot be zero.'))
elif visibility_duration > timedelta(days=3650):
raise ValidationError(_('The visibility duration cannot be longer than 10 years. Leave the field empty '
'for indefinite.'))
def validate_retention_period(self, field):
retention_period = field.data
if retention_period is None:
return
elif retention_period <= timedelta():
raise ValidationError(_('The retention period cannot be zero or negative.'))
elif retention_period > timedelta(days=3650):
raise ValidationError(_('The retention period cannot be longer than 10 years. Leave the field empty for '
'indefinite.'))
visibility_duration = (timedelta(weeks=self.visibility.data[2]) if self.visibility.data[2] is not None
else None)
if visibility_duration and visibility_duration > retention_period:
raise ValidationError(_('The retention period cannot be lower than the visibility duration.'))
fields = (RegistrationFormItem.query
.with_parent(self.regform)
.filter(RegistrationFormItem.is_enabled,
~RegistrationFormItem.is_deleted,
RegistrationFormItem.retention_period.isnot(None),
RegistrationFormItem.retention_period > retention_period)
.all())
if fields:
raise ValidationError(_('The retention period of the whole form cannot be lower than '
'that of individual fields.'))
|
8aa087623b133486d16a6732fafadf5bf8870b20
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/olap/ByConity/tests/testflows/rbac/tests/privileges/show/show_columns.py
|
108200e7a579652bf40cb9f3e7ab793710610427
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 8,318
|
py
|
show_columns.py
|
from testflows.core import *
from testflows.asserts import error
from rbac.requirements import *
from rbac.helper.common import *
import rbac.helper.errors as errors
@TestSuite
def describe_with_privilege_granted_directly(self, node=None):
"""Check that user is able to execute DESCRIBE on a table if and only if
they have SHOW COLUMNS privilege for that table granted directly.
"""
user_name = f"user_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"):
table_name = f"table_name_{getuid()}"
Suite(test=describe)(grant_target_name=user_name, user_name=user_name, table_name=table_name)
@TestSuite
def describe_with_privilege_granted_via_role(self, node=None):
"""Check that user is able to execute DESCRIBE on a table if and only if
they have SHOW COLUMNS privilege for that table granted through a role.
"""
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"), role(node, f"{role_name}"):
table_name = f"table_name_{getuid()}"
with When("I grant the role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
Suite(test=describe)(grant_target_name=role_name, user_name=user_name, table_name=table_name)
@TestSuite
@Requirements(
RQ_SRS_006_RBAC_DescribeTable_RequiredPrivilege("1.0"),
)
def describe(self, grant_target_name, user_name, table_name, node=None):
"""Check that user is able to execute DESCRIBE only when they have SHOW COLUMNS privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with table(node, table_name):
with Scenario("DESCRIBE table without privilege"):
with When("I grant the user NONE privilege"):
node.query(f"GRANT NONE TO {grant_target_name}")
with And("I grant the user USAGE privilege"):
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("DESCRIBE with privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE TABLE {table_name}", settings=[("user",user_name)])
with Scenario("DESCRIBE with revoked privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with And(f"I revoke SHOW COLUMNS on the table"):
node.query(f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("DESCRIBE with revoked ALL privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with And("I revoke ALL privilege"):
node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("DESCRIBE with ALL privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT ALL ON *.* TO {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE TABLE {table_name}", settings=[("user",user_name)])
@TestSuite
def show_create_with_privilege_granted_directly(self, node=None):
"""Check that user is able to execute SHOW CREATE on a table if and only if
they have SHOW COLUMNS privilege for that table granted directly.
"""
user_name = f"user_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"):
table_name = f"table_name_{getuid()}"
Suite(test=show_create)(grant_target_name=user_name, user_name=user_name, table_name=table_name)
@TestSuite
def show_create_with_privilege_granted_via_role(self, node=None):
"""Check that user is able to execute SHOW CREATE on a table if and only if
they have SHOW COLUMNS privilege for that table granted directly.
"""
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"), role(node, f"{role_name}"):
table_name = f"table_name_{getuid()}"
with When("I grant the role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
Suite(test=show_create)(grant_target_name=role_name, user_name=user_name, table_name=table_name)
@TestSuite
@Requirements(
RQ_SRS_006_RBAC_ShowCreateTable_RequiredPrivilege("1.0"),
)
def show_create(self, grant_target_name, user_name, table_name, node=None):
"""Check that user is able to execute SHOW CREATE on a table only when they have SHOW COLUMNS privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with table(node, table_name):
with Scenario("SHOW CREATE without privilege"):
with When("I grant the user NONE privilege"):
node.query(f"GRANT NONE TO {grant_target_name}")
with And("I grant the user USAGE privilege"):
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("SHOW CREATE with privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)])
with Scenario("SHOW CREATE with revoked privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with And(f"I revoke SHOW COLUMNS on the table"):
node.query(f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("SHOW CREATE with ALL privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT ALL ON *.* TO {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)])
@TestFeature
@Name("show columns")
@Requirements(
RQ_SRS_006_RBAC_ShowColumns_Privilege("1.0"),
RQ_SRS_006_RBAC_Privileges_All("1.0"),
RQ_SRS_006_RBAC_Privileges_None("1.0")
)
def feature(self, node="clickhouse1"):
"""Check the RBAC functionality of SHOW COLUMNS.
"""
self.context.node = self.context.cluster.node(node)
Suite(run=describe_with_privilege_granted_directly, setup=instrument_clickhouse_server_log)
Suite(run=describe_with_privilege_granted_via_role, setup=instrument_clickhouse_server_log)
Suite(run=show_create_with_privilege_granted_directly, setup=instrument_clickhouse_server_log)
Suite(run=show_create_with_privilege_granted_via_role, setup=instrument_clickhouse_server_log)
|
c51a3725bdb42d1db6cec8329f80e9ec11898266
|
b3950a2a6912c9b494d22b9353322c3357df0110
|
/tock/api/views.py
|
d0de0c995cfb8cf199ea04baf49402f23b7005c4
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
18F/tock
|
df1fa5e817e690ce0bff315a15799e2f78915882
|
99005d8f6c4605a69fbb620c41f38447ecbee459
|
refs/heads/main
| 2023-08-31T01:34:55.299577
| 2023-08-23T18:49:10
| 2023-08-23T18:49:10
| 30,162,008
| 135
| 50
|
NOASSERTION
| 2023-09-07T18:40:30
| 2015-02-01T22:19:32
|
Python
|
UTF-8
|
Python
| false
| false
| 16,038
|
py
|
views.py
|
import collections
import datetime
from django.contrib.auth import get_user_model
from django.db import connection
from django.db.models import Count, F
from django.utils.html import escape
from rest_framework import serializers, generics
from rest_framework.exceptions import ParseError
from hours.models import TimecardObject, Timecard, ReportingPeriod
from projects.models import Project
from employees.models import UserData
User = get_user_model()
# Serializers for different models
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = (
'id',
'client',
'name',
'description',
'billable',
'start_date',
'end_date',
'active',
'profit_loss_account',
'organization',
)
billable = serializers.BooleanField(source='accounting_code.billable')
profit_loss_account = serializers.CharField(source='profit_loss_account.name', allow_null=True)
client = serializers.StringRelatedField(source='accounting_code')
organization = serializers.StringRelatedField()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'id',
'username',
'first_name',
'last_name',
'email'
)
class UserDataSerializer(serializers.Serializer):
user = serializers.StringRelatedField()
current_employee = serializers.BooleanField()
is_18f_employee = serializers.BooleanField()
is_active = serializers.BooleanField()
is_billable = serializers.BooleanField()
unit = serializers.StringRelatedField()
organization = serializers.StringRelatedField()
def get_unit(self,obj):
return obj.unit
class ReportingPeriodSerializer(serializers.ModelSerializer):
class Meta:
model = ReportingPeriod
fields = (
'start_date',
'end_date',
'exact_working_hours',
'min_working_hours',
'max_working_hours',
)
class SubmissionSerializer(serializers.Serializer):
user = serializers.CharField(source='id')
username = serializers.CharField()
first_name = serializers.CharField()
last_name = serializers.CharField()
email = serializers.CharField()
on_time_submissions = serializers.CharField(source='tcount')
class TimecardSerializer(serializers.Serializer):
user = serializers.StringRelatedField(source='timecard.user')
project_id = serializers.CharField(source='project.id')
project_name = serializers.CharField(source='project.name')
profit_loss_account = serializers.CharField(
source='project.profit_loss_account.name',
allow_null=True
)
hours_spent = serializers.DecimalField(max_digits=5, decimal_places=2)
project_allocation = serializers.DecimalField(max_digits=6, decimal_places=3)
start_date = serializers.DateField(
source='timecard.reporting_period.start_date'
)
end_date = serializers.DateField(
source='timecard.reporting_period.end_date'
)
billable = serializers.BooleanField(
source='project.accounting_code.billable'
)
agency = serializers.CharField(
source='project.accounting_code.agency.name'
)
flat_rate = serializers.BooleanField(
source='project.accounting_code.flat_rate'
)
notes = serializers.CharField()
billable_expectation = serializers.CharField(
source='timecard.billable_expectation'
)
employee_organization = serializers.CharField(
source='timecard.user.user_data.organization_name'
)
project_organization = serializers.CharField(
source='project.organization_name'
)
grade = serializers.IntegerField(
source='grade.grade',
allow_null=True
)
class FullTimecardSerializer(serializers.ModelSerializer):
# Fields that require accessing other models
user_name = serializers.CharField(source='user.username')
reporting_period_start_date = serializers.DateField(source='reporting_period.start_date')
reporting_period_end_date = serializers.DateField(source='reporting_period.end_date')
class Meta:
model = Timecard
fields = [
# straight pass-through fields
'id',
'submitted',
'submitted_date',
'billable_expectation',
'target_hours',
'billable_hours',
'non_billable_hours',
'excluded_hours',
'utilization',
# fields from other models
'user_name',
'reporting_period_start_date',
'reporting_period_end_date',
]
# API Views
class UserDataView(generics.ListAPIView):
queryset = UserData.objects.all()
serializer_class = UserDataSerializer
class ProjectList(generics.ListAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
class ProjectInstanceView(generics.RetrieveAPIView):
""" Return the details of a specific project """
queryset = Project.objects.all()
model = Project
serializer_class = ProjectSerializer
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class ReportingPeriodList(generics.ListAPIView):
queryset = ReportingPeriod.objects.all()
serializer_class = ReportingPeriodSerializer
class ReportingPeriodAudit(generics.ListAPIView):
"""
Retrieves a list of users who have not filled out
their time cards for a given time period
"""
queryset = ReportingPeriod.objects.all()
serializer_class = UserSerializer
lookup_field = 'start_date'
def get_queryset(self):
reporting_period = self.queryset.get(
start_date=datetime.datetime.strptime(
self.kwargs['reporting_period_start_date'], "%Y-%m-%d"
).date()
)
filed_users = list(
Timecard.objects.filter(
reporting_period=reporting_period,
submitted=True
).distinct().all().values_list('user__id', flat=True))
return get_user_model().objects \
.exclude(user_data__start_date__gte=reporting_period.end_date) \
.exclude(id__in=filed_users) \
.filter(user_data__current_employee=True) \
.order_by('last_name', 'first_name')
class Submissions(generics.ListAPIView):
"""
Returns a list of users and the number of timecards they have
submitted on time since the requested reporting period
"""
serializer_class = SubmissionSerializer
def get_queryset(self):
rp_num = self.kwargs['num_past_reporting_periods']
reporting_period = list(ReportingPeriod.get_most_recent_periods(
number_of_periods=rp_num
))[-1]
# filter to punctually submitted timecards
# between the requested period and today
today = datetime.date.today()
timecards = Timecard.objects.filter(
reporting_period__end_date__lt=today,
reporting_period__end_date__gte=reporting_period.end_date,
submitted_date__lte=F('reporting_period__end_date')
)
return get_user_timecard_count(timecards)
class FullTimecardList(generics.ListAPIView):
serializer_class = FullTimecardSerializer
def get_queryset(self):
# Lookup the associated user and reporting_period in the original
# query since we'll be accessing them later. See https://docs.djangoproject.com/en/3.2/ref/models/querysets/#django.db.models.query.QuerySet.select_related
queryset = Timecard.objects.select_related(
'user',
'reporting_period',
)
return filter_timecards(queryset, self.request.query_params)
class TimecardList(generics.ListAPIView):
""" Endpoint for timecard data in csv or json """
# Eagerly load related rows to avoid n+1 selects
queryset = TimecardObject.objects.select_related(
'timecard__user',
'project__accounting_code__agency',
'timecard__reporting_period',
'grade',
).order_by(
'timecard__reporting_period__start_date'
)
serializer_class = TimecardSerializer
def get_queryset(self):
return get_timecardobjects(self.queryset, self.request.query_params)
def date_from_iso_format(date_str):
try:
return datetime.date.fromisoformat(date_str)
except ValueError:
raise ParseError(
detail='Invalid date format. Got {}, expected ISO format (YYYY-MM-DD)'.format(
escape(date_str)
)
)
def filter_timecards(queryset, params={}):
"""
Filter a queryset of timecards according to the provided query
string parameters.
* `date`: filter for reporting periods that contain this date
* `user`: either username or numeric id for a user
* `after`: the reporting period ends after the given date
* `before`: the reporting period starts before the given date
"""
submitted_param = params.get("submitted", "yes") # default to only submitted cards
submitted = (submitted_param != "no")
queryset = queryset.filter(submitted=submitted)
if not params:
return queryset
if 'date' in params:
reporting_date = date_from_iso_format(params.get('date'))
queryset = queryset.filter(
reporting_period__start_date__lte=reporting_date,
reporting_period__end_date__gte=reporting_date
)
if 'user' in params:
# allow either user name or ID
user = params.get('user')
if user.isnumeric():
queryset = queryset.filter(user__id=user)
else:
queryset = queryset.filter(user__username=user)
if 'after' in params:
# get everything after a specified date
after_date = date_from_iso_format(params.get('after'))
queryset = queryset.filter(
reporting_period__end_date__gte=after_date
)
if 'before' in params:
# get everything before a specified date
before_date = date_from_iso_format(params.get('before'))
queryset = queryset.filter(
reporting_period__start_date__lte=before_date
)
if 'org' in params:
# filter on organization, "0" to include all orgs, "None" for
# "organization IS NULL"
org_id = params.get('org')
if org_id.isnumeric() and org_id != "0": # 0 indicates all organizations, no filtering then
queryset = queryset.filter(user__user_data__organization__id=org_id)
elif org_id.lower() == "none": # the only allowable value that isn't numeric is None
queryset = queryset.filter(user__user_data__organization__isnull=True)
return queryset
def get_timecardobjects(queryset, params={}):
"""
Filter a TimecardObject queryset according to the provided GET
query string parameters:
* `project`: numeric id or name of a project
* `billable`: `True` or `False` to filter for projects that are or aren't billable
"""
# queryset as passed is a queryset of TimecardObjects. Get a queryset of
# the matching Timecards that we can filter...
timecard_queryset = Timecard.objects.filter(timecardobjects__in=queryset)
timecard_queryset = filter_timecards(timecard_queryset, params)
# and now sub-select the matching timecardobjects from our original
# queryset
queryset = queryset.filter(timecard__in=timecard_queryset)
if 'project' in params:
# allow either project name or ID
project = params.get('project')
if project.isnumeric():
queryset = queryset.filter(project__id=project)
else:
queryset = queryset.filter(project__name=project)
if 'billable' in params:
# only pull records for billable projects
billable = params.get('billable')
queryset = queryset.filter(
project__accounting_code__billable=billable
)
return queryset
def get_user_timecard_count(queryset):
"""
Get a list of users and the number of the timecards
from a queryset of timecards passed in
"""
timecard_ids = queryset.values_list('id', flat=True)
user_timecard_counts = User.objects.filter(
timecards__id__in=timecard_ids
).annotate(
tcount=Count('timecards')
)
return user_timecard_counts
from rest_framework.response import Response
from rest_framework.decorators import api_view
hours_by_quarter_query = '''
with agg as (
select
extract(year from rp.start_date) +
(extract(month from rp.start_date) / 10) as year,
(extract(month from rp.start_date) + 3 - 1)::int % 12 / 3 + 1 as quarter,
billable,
sum(hours_spent) as hours
from hours_timecardobject tco
join hours_timecard tc on tco.timecard_id = tc.id
join hours_reportingperiod rp on tc.reporting_period_id = rp.id
join projects_project pr on tco.project_id = pr.id
join projects_accountingcode ac on pr.accounting_code_id = ac.id
where tc.submitted = True
group by
year,
quarter,
billable
)
select
year,
quarter,
coalesce(max(case when billable then hours else null end), 0) as billable,
coalesce(max(case when not billable then hours else null end), 0) as nonbillable,
sum(hours) as total
from agg
group by
year,
quarter
'''
HoursByQuarter = collections.namedtuple(
'HoursByQuarter',
['year', 'quarter', 'billable', 'nonbillable', 'total'],
)
class HoursByQuarterSerializer(serializers.Serializer):
year = serializers.IntegerField()
quarter = serializers.IntegerField()
billable = serializers.FloatField()
nonbillable = serializers.FloatField()
total = serializers.FloatField()
@api_view()
def hours_by_quarter(request, *args, **kwargs):
cursor = connection.cursor()
cursor.execute(hours_by_quarter_query)
rows = cursor.fetchall()
return Response([
HoursByQuarterSerializer(HoursByQuarter(*each)).data
for each in rows
])
hours_by_quarter_by_user_query = '''
with agg as (
select
extract(year from rp.start_date) +
(extract(month from rp.start_date) / 10) as year,
(extract(month from rp.start_date) + 3 - 1)::int % 12 / 3 + 1 as quarter,
username,
billable,
sum(hours_spent) as hours
from hours_timecardobject tco
join hours_timecard tc on tco.timecard_id = tc.id
join hours_reportingperiod rp on tc.reporting_period_id = rp.id
join auth_user usr on tc.user_id = usr.id
join projects_project pr on tco.project_id = pr.id
join projects_accountingcode ac on pr.accounting_code_id = ac.id
where tc.submitted = True
group by
year,
quarter,
username,
billable
)
select
year,
quarter,
username,
coalesce(max(case when billable then hours else null end), 0) as billable,
coalesce(max(case when not billable then hours else null end), 0) as nonbillable,
sum(hours) as total
from agg
group by
year,
quarter,
username
'''
HoursByQuarterByUser = collections.namedtuple(
'HoursByQuarter',
['year', 'quarter', 'username', 'billable', 'nonbillable', 'total'],
)
class HoursByQuarterByUserSerializer(serializers.Serializer):
year = serializers.IntegerField()
quarter = serializers.IntegerField()
username = serializers.CharField()
billable = serializers.FloatField()
nonbillable = serializers.FloatField()
total = serializers.FloatField()
@api_view()
def hours_by_quarter_by_user(request, *args, **kwargs):
cursor = connection.cursor()
cursor.execute(hours_by_quarter_by_user_query)
rows = cursor.fetchall()
return Response([
HoursByQuarterByUserSerializer(HoursByQuarterByUser(*each)).data
for each in rows
])
|
9dbd47b0d3e4a50976e7d61b0b3995d499e0fc69
|
03666e5f961946fc1a0ac67781ac1425562ef0d7
|
/src/visitpy/visit_flow/flow/src/core/workspace.py
|
128dc7b241d906cc27ebb15cbe362b3e25ed690d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
visit-dav/visit
|
e9f81b4d4b9b9930a0db9d5282cd1bcabf465e2e
|
601ae46e0bef2e18425b482a755d03490ade0493
|
refs/heads/develop
| 2023-09-06T08:19:38.397058
| 2023-09-05T21:29:32
| 2023-09-05T21:29:32
| 165,565,988
| 335
| 120
|
BSD-3-Clause
| 2023-09-14T00:53:37
| 2019-01-13T23:27:26
|
C
|
UTF-8
|
Python
| false
| false
| 18,192
|
py
|
workspace.py
|
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: workspace.py
author: Cyrus Harrison <cyrush@llnl.gov>
created: 10/14/2010
description:
Workspace data flow abstractions.
"""
import sys
import hashlib
import imp
import traceback
from .registry import *
from .filter_graph import *
from .state_control import *
from ..parser import *
from . import log
# logging helper for workspace
def info(msg):
log.info(msg,"workspace")
def define_module(module_name,module_script,parent_dict=None):
if module_name in sys.modules:
module = sys.modules[module_name]
else:
module = imp.new_module(module_name)
sys.modules[module_name] = module
module.__file__ = "<%s>" % module_name
module.__loader = None
exec(module_script,module.__dict__,module.__dict__)
if not parent_dict is None:
parent_dict[module_name] = __input__(module_name)
return module
class Context(object):
"""
Base class for contexts.
"""
context_type = "Context"
default_params = {}
def __init__(self, workspace, name, params = None, parent = None):
self.__workspace = workspace
self.name = name
self.params = PropertyTree(init=self.default_params)
self.parent = parent
if not params is None:
self.params.update(PropertyTree(init=params))
def add_context(self,context_type,context_name, params = None):
"""
Adds a context to the workspace.
This instance is the parent of the new context.
"""
return self.__workspace.add_context(context_type,context_name,params,parent=self)
def add_filter(self,filter_type,filter_name=None,params=None):
"""
Adds a node to the workspace.
This instance is used as the context of the new node.
"""
return self.__workspace.add_filter(filter_type,filter_name,params,self)
def has_filter(self,filter_name):
return self.__workspace.has_filter(filter_name)
def filter_names(self,filter_name):
return self.__workspace.filter_names()
def add_registry_source(self,
entry_key,
obj,
uref=-1,
filter_type=None,
filter_name=None,
context=None):
"""
Adds a data object to the registry and creates a source node for this
data object.
This instance is used as the context of the new node.
"""
return self.__workspace.add_registry_source(entry_key,obj,uref,
filter_type,filter_name,
self)
def connect(self,src_name,des_port):
"""
Connects filter nodes in the workspace.
Convenience method for interacting with contexts.
"""
return self.__workspace.connect(src_name,des_port,self)
def remove_filter(self,fitler_name):
"""
Removes a filter node from the workspace.
Convenience method for interacting with contexts.
"""
return self.__workspace.remove_filter(fitler_name)
def registry_add(self,key,obj,uref=-1):
"""
Adds an entry to the workspace registry.
Convenience method for interacting with contexts.
"""
return self.__workspace.registry.add_entry(key,obj,uref)
def registry_fetch(self,key):
"""
Fetches an entry from the workspace registry.
Registry reference count is not changed.
"""
return self.__workspace.registry.fetch_entry(key,direct_fetch=True)
def registry_keys(self):
"""
Returns a list of keys of the active entires in the workspace's
registry.
"""
return self.__workspace.registry_keys()
def parent_context(self,context_type=None,context_name=None):
"""
Fetches a parent context with a given name or type.
ctx.parent_context(context_name="root")
ctx.parent_context(context_type="<default_context>")
"""
if self.parent is None or (context_type is None and context_name is None):
return self.parent
elif not context_name is None:
if self.parent.name == context_name:
return self.parent
else:
return self.parent.parent_context(context_name=context_name)
elif not context_type is None:
if self.parent.context_type == context_type:
return self.parent
else:
return self.parent.parent_context(context_type=context_type)
return None
def parameters(self):
return self.params
@classmethod
def default_parameters(cls):
if isinstance(cls.default_params,PropertyTree):
return cls.default_params.properties()
else:
return dict(cls.default_params)
def __getitem__(self,path):
"""
Fetches an entry from the params PropertyTree.
"""
return self.params[path]
def __setitem__(self,path,obj):
"""
Sets an entry in the params PropertyTree.
"""
self.params[path] = obj
def __str__(self):
"""
String pretty print.
"""
return "%s:[%s]" % (self.name, self.context_type)
class Workspace(object):
"""
Main data flow container abstraction.
"""
def __init__(self):
self.graph = FilterGraph()
self.registry = Registry()
self.context_types = {}
self.contexts = {}
self.contexts["<default_context>"] = Context(self,"<default_context>")
self.register_filter(RegistrySource)
def register_filter_module(self,filter_module):
"""
Registers a set of filters and contexts exposed in a filter module.
Registers Filter subclasses in a list named `fitlers'.
Registers Context subclasses in a list named `context'.
"""
mdir = dir(filter_module)
if "filters" in mdir:
for f in filter_module.filters:
self.register_filter(f)
if "contexts" in mdir:
for ctx in filter_module.contexts:
self.register_context(ctx)
def register_filters(self,filters):
"""
Helper
"""
if "filters" in dir(filters):
self.register_filter_module(filters)
else:
for f in filters:
self.register_filter(f)
def register_context(self,context):
"""
Registers a new Context subclass for use.
"""
self.context_types[context.context_type] = context
def register_filter(self,filter_class):
"""
Registers a new Filter subclass for use.
"""
self.graph.register_filter(filter_class)
def add_context(self,context_type,context_name,parent=None):
"""
Adds a context to the workspace.
"""
if context_type in list(self.context_types.keys()):
ccls = self.context_types[context_type]
res = ccls(self,context_name,parent)
self.contexts[context_name] = res
return res
else:
raise UnregisteredContextError(context_type)
def add_filter(self,filter_type,name=None,params=None,context=None):
"""
Adds a filter node instance to the workspace.
"""
if context is None:
context = self.get_context("<default_context>")
return self.graph.add_node(filter_type,name,params,context)
def add_registry_source(self,
entry_key,
obj,uref=-1,
filter_type=None,
filter_name=None,
context=None):
"""
Adds a data object to the registry and creates a source node for this
data object.
"""
self.registry_add(entry_key,obj,uref)
if filter_type is None:
filter_type = "<registry_source>"
if filter_name is None:
filter_name = entry_key
self.add_filter(filter_type,filter_name,context=context)
def connect(self,src_name,des_port,context=None):
"""
Connects filter nodes in the workspace.
"""
# check for a reg source
reg_src = src_name.startswith(":")
if reg_src and not self.has_filter(src_name):
# assume data is in the registry & auto add a reg source.
info("Adding automatic registry source = %s" % src_name)
self.add_filter("<registry_source>",src_name,context=context)
if isinstance(des_port,str):
des,port = des_port.split(":")
else: # tuple or list
des,port = des_port
self.graph.connect(src_name,des,port)
def remove_filter(self,filter_name):
"""
Removes the filter node with the given name from the workspace.
"""
return self.graph.filter_name(filter_name)
def has_filter(self,filter_name):
"""
Returns True if a filter node with the given name exists in
the workspace.
"""
return self.graph.has_node(filter_name)
def filter_names(self):
"""
Returns the names of the active filter nodes in the workspace.
"""
return list(self.graph.nodes.keys())
def has_context(self,context_name):
"""
Returns True if a context with the given name exists in
the workspace.
"""
return context_name in list(self.contexts.keys())
def get_context(self,context_name):
"""
Returns the names of the active filter nodes in the workspace.
"""
if context_name in list(self.contexts.keys()):
return self.contexts[context_name]
return None
def get_filter(self,filter_name):
"""
Returns the a filter node with the given name exists in the workspace.
"""
return self.graph.get_node(filter_name)
def registry_add(self,entry_key,obj,uref=-1):
"""
Adds an entry to the workspace's registry.
"""
return self.registry.add_entry(entry_key,obj,uref)
def registry_fetch(self,entry_key):
"""
Fetches an entry from the workspace's registry.
"""
return self.registry.fetch_entry(entry_key)
def registry_clear(self):
"""
Clears all entries from the workspace's registry.
"""
return self.registry.clear()
def registry_keys(self):
"""
Returns a list of keys of the active entires in the workspace's
registry.
"""
return list(self.registry.keys())
def execution_plan(self):
"""
Generates a workspace execution plan.
"""
return ExecutionPlan(self.graph)
def execute(self,states=None):
"""
Executes a flow workspace for a given set of states.
TODO:MORE INFO
"""
if states is None:
states = StateVector(0,[0])
plan = self.execution_plan()
if isinstance(states,StateVector):
return self.__execute_single(plan,states)
elif isinstance(states,list) or isinstance(states,StateVectorGenerator):
for svec in states:
self.__execute_single(plan,svec)
def __execute_single(self,plan,svec):
"""
Helper used to execute a flow workspace for a single state vector.
"""
info("Execute single: StateVector = %s" % str(svec))
rval = None
tidx = 0
for t in plan.traversals:
info("Execute Traversal %d" % tidx)
for v in t:
# get filter node & # of refs
node_name, uref = v
node = self.graph.nodes[node_name]
try:
# get inputs from registry
inputs = {}
msg = "Execute: %s" % node_name
for port_name in node.input_ports:
src_name = self.graph.edges_in[node_name][port_name]
entry_key = str(svec) + ":" + src_name
msg += " (%s:%s)" % (port_name,entry_key)
data = self.registry.fetch_entry(entry_key)
inputs[port_name] = data
node.set_inputs(inputs)
node.set_state_vector(svec)
info(msg)
res = node.execute()
except Exception as e:
msg = "Execute Error: %s" % node_name
exc_type, exc_value, exc_traceback = sys.exc_info()
emsg = traceback.format_exception(exc_type,
exc_value,
exc_traceback)
emsg = "".join(emsg)
info(msg)
info("\n<Traceback>\n" + emsg)
print(msg)
print("\n<Traceback>\n" + emsg)
raise e
# if output exists, place in registry
if not res is None:
entry_key = str(svec) + ":" + node.name
self.registry.add_entry(entry_key,res,uref)
rval= res
tidx += 1
return res
def setup_expression_network(self,txt,ctx=None):
"""
Uses the expression parser to setup the workspace from a user
expression.
"""
if ctx is None:
ctx = self.get_context("<default_context>")
Generator.parse_network(txt,ctx)
@classmethod
def load_workspace_script(cls,src=None,file=None):
"""
Helper used to load a workspace from a python script.
(Legacy Path)
"""
if src is None and not filename is None:
info("Loading workingspace from: %s" % os.path.abspath(file))
src = open(file).read()
module_name = hashlib.md5(src).hexdigest()
res = define_module(module_name,src)
# setup the workspace
w = res.setup_workspace()
return w;
def to_dict(self):
res = {"context_types":{},
"contexts":{}}
for k,v in list(self.context_types.items()):
res["context_types"][k] = {"default_params":dict(v.default_parameters())}
for k,v in list(self.contexts.items()):
ctx = {"type":v.context_type,
"params": v.parameters().properties(),
"parent": None}
if not v.parent is None:
ctx["parent"] = v.parent.name
res["contexts"][k] = ctx
graph_res = self.graph.to_dict()
res.update(graph_res)
return res
def load_dict(self,wdict):
# for now assume the filters and contexts are installed
# just create and hook up the filters
for node_name, node in list(wdict["nodes"].items()):
params = None
ctx = None
if "params" in node:
params = node["params"]
if "context" in node:
ctx = self.get_context(node["context"])
self.add_filter(node["type"],node_name,params,ctx)
for edge in wdict["connections"]:
self.connect(edge["from"],[edge["to"],edge["port"]])
class ExecutionPlan(object):
"""
Workspace execution plan.
Provides info about graph traversals that is used to execute
a data flow network.
"""
def __init__(self,g):
self.traversals = []
self.untouched = []
# find src & sink nodes
snks = []
srcs = []
for node in list(g.nodes.values()):
if not node.output_port or len(g.edges_out[node.name]) == 0:
snks.append(node.name)
if node.output_port and not node.name in list(g.edges_in.keys()):
srcs.append(node.name)
tags = {}
for name in list(g.nodes.keys()):
tags[name] = 0
# execute bf traversals from each snk
for snk_name in snks:
trav = []
self.__visit(g,snk_name,tags,trav)
if len(trav) > 0:
self.traversals.append(trav)
self.untouched = []
for name, tag in list(tags.items()):
if tag == 0:
self.untouched.append(name)
def __visit(self,g,node_name,tag,trav):
"""
Traversal visitor for graph topo-sort.
"""
if tag[node_name] != 0 : return
uref = 1
tag[node_name] = 1
node = g.nodes[node_name]
if node.output_port:
uref = max(1,len(g.edges_out[node_name]))
if node.number_of_input_ports() > 0:
for src_name in list(g.edges_in[node_name].values()):
if not src_name is None:
self.__visit(g,src_name,tag,trav)
else: # dangle?
uref = 0
if uref > 0:
trav.append((node_name, uref))
def __str__(self):
"""
String pretty print.
"""
ntrav = 0
res = "Execution Plan:\n# of Traversals = %d\n" % len(self.traversals)
for trav in self.traversals:
res += "\n Traversal %d:\n" % ntrav
for node_name,uref in trav:
res += " %s (%d)\n" %(node_name,uref)
res += "\n"
ntrav +=1
nut = 0
res += "# of Untouched Filter Nodes = %d\n" % len(self.untouched)
if len(self.untouched) > 0:
res += " Untouched Filter Nodes:\n"
for node_name in self.untouched:
res += " %s\n" %(node_name)
return res
|
ba3589413d92c82682d3682bcc91df552ff73644
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/nxos/tests/ShowIpOspfInterface/cli/equal/golden_output_3_expected.py
|
cdda27c542ddfcc6853d4ccd8d817a1f9d4ab162
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 14,731
|
py
|
golden_output_3_expected.py
|
expected_output = {
'vrf': {
'GENIE-CORE': {
'address_family': {
'ipv4': {
'instance': {
'1000': {
'areas': {
'0.0.0.1': {
'interfaces': {
'Ethernet1/2': {
'bfd': {
'enable': False
},
'cost': 20,
'dead_interval': 6,
'enable': True,
'hello_interval': 2,
'hello_timer': '00:00:01',
'if_cfg': True,
'index': 1,
'interface_type': 'p2p',
'ip_address': '10.100.31.27/31',
'line_protocol': 'up',
'name': 'Ethernet1/2',
'passive': False,
'retransmit_interval': 5,
'state': 'p2p',
'statistics': {
'link_scope_lsa_cksum_sum': 0,
'link_scope_lsa_count': 0,
'num_nbrs_adjacent': 1,
'num_nbrs_flooding': 1,
'total_neighbors': 1
},
'transmit_delay': 1,
'wait_interval': 6
},
'Vlan959': {
'bfd': {
'enable': False
},
'cost': 10,
'dead_interval': 6,
'enable': True,
'hello_interval': 2,
'hello_timer': '00:00:00',
'if_cfg': True,
'index': 4,
'interface_type': 'p2p',
'ip_address': '10.100.31.217/30',
'line_protocol': 'up',
'name': 'Vlan959',
'passive': False,
'retransmit_interval': 5,
'state': 'p2p',
'statistics': {
'link_scope_lsa_cksum_sum': 0,
'link_scope_lsa_count': 0,
'num_nbrs_adjacent': 1,
'num_nbrs_flooding': 1,
'total_neighbors': 1
},
'transmit_delay': 1,
'wait_interval': 6
},
'loopback110': {
'bfd': {
'enable': False
},
'cost': 1,
'enable': True,
'if_cfg': True,
'index': 3,
'interface_type': 'loopback',
'ip_address': '10.100.0.13/32',
'line_protocol': 'up',
'name': 'loopback110',
'state': 'loopback'
},
'port-channel1001': {
'bfd': {
'enable': True
},
'cost': 10,
'dead_interval': 6,
'enable': True,
'hello_interval': 2,
'hello_timer': '00:00:01',
'if_cfg': True,
'index': 5,
'interface_type': 'p2p',
'ip_address': '10.100.31.197/30',
'line_protocol': 'up',
'name': 'port-channel1001',
'passive': False,
'retransmit_interval': 5,
'state': 'p2p',
'statistics': {
'link_scope_lsa_cksum_sum': 0,
'link_scope_lsa_count': 0,
'num_nbrs_adjacent': 1,
'num_nbrs_flooding': 1,
'total_neighbors': 1
},
'transmit_delay': 1,
'wait_interval': 6
}
}
}
}
}
}
}
}
},
'default': {
'address_family': {
'ipv4': {
'instance': {
'2000': {
'areas': {
'0.0.0.1': {
'interfaces': {
'Ethernet1/31': {
'bfd': {
'enable': False
},
'cost': 100,
'dead_interval': 6,
'enable': True,
'hello_interval': 2,
'hello_timer': '00:00:01',
'if_cfg': True,
'index': 3,
'interface_type': 'p2p',
'ip_address': '10.100.31.252/31',
'line_protocol': 'up',
'name': 'Ethernet1/31',
'passive': False,
'retransmit_interval': 5,
'state': 'p2p',
'statistics': {
'num_nbrs_adjacent': 1,
'num_nbrs_flooding': 1,
'total_neighbors': 1
},
'transmit_delay': 1,
'wait_interval': 6
},
'Ethernet1/45': {
'bfd': {
'enable': False
},
'cost': 100,
'dead_interval': 40,
'enable': True,
'hello_interval': 10,
'if_cfg': False,
'index': 1,
'interface_type': 'p2p',
'ip_address': '10.111.3.2/30',
'line_protocol': 'down',
'name': 'Ethernet1/45',
'passive': False,
'retransmit_interval': 5,
'state': 'down',
'statistics': {
'link_scope_lsa_cksum_sum': 0,
'link_scope_lsa_count': 0,
'num_nbrs_adjacent': 0,
'num_nbrs_flooding': 0,
'total_neighbors': 0
},
'transmit_delay': 1,
'wait_interval': 40
},
'Vlan3030': {
'bfd': {
'enable': False
},
'cost': 1000,
'enable': True,
'if_cfg': True,
'index': 118,
'interface_type': 'broadcast',
'ip_address': '10.115.128.4/24',
'line_protocol': 'up',
'name': 'Vlan3030',
'passive': True,
'state': 'dr'
},
'Vlan986': {
'bfd': {
'enable': False
},
'cost': 1000,
'enable': True,
'if_cfg': True,
'index': 122,
'interface_type': 'broadcast',
'ip_address': '10.100.17.51/29',
'line_protocol': 'up',
'name': 'Vlan986',
'passive': True,
'state': 'dr'
},
'Vlan997': {
'bfd': {
'enable': False
},
'cost': 10,
'dead_interval': 40,
'enable': True,
'hello_interval': 10,
'hello_timer': '00:00:04',
'if_cfg': True,
'index': 137,
'interface_type': 'p2p',
'ip_address': '10.100.17.81/30',
'line_protocol': 'up',
'name': 'Vlan997',
'passive': False,
'retransmit_interval': 5,
'state': 'p2p',
'statistics': {
'num_nbrs_adjacent': 1,
'num_nbrs_flooding': 1,
'total_neighbors': 1
},
'transmit_delay': 1,
'wait_interval': 40
},
'loopback100': {
'bfd': {
'enable': False
},
'cost': 1,
'enable': True,
'if_cfg': True,
'index': 50,
'interface_type': 'loopback',
'ip_address': '10.100.0.11/32',
'line_protocol': 'up',
'name': 'loopback100',
'state': 'loopback'
}
}
}
}
}
}
}
}
}
}
}
|
036c688eddb01d27c564952dbe2c69f64091d52a
|
364774e29ef2474552ea3839de0951e63cbae0a6
|
/wouso/games/quest/forms.py
|
6748188558afd6d7e60c9bbafb45255518e13b7c
|
[
"Apache-2.0"
] |
permissive
|
rosedu/wouso
|
66c50ef750cf79d6959768f7df93cc08607cc266
|
ed34c62ac925db719388f27fe5acb40376d8d0c1
|
refs/heads/master
| 2022-10-29T14:28:51.818073
| 2022-09-24T18:54:04
| 2022-09-24T18:54:04
| 2,965,476
| 121
| 97
|
NOASSERTION
| 2019-11-15T09:33:50
| 2011-12-12T16:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
forms.py
|
from django.forms import CharField, Form, ModelForm, TextInput
from models import Quest
from bootstrap3_datetime import widgets
class QuestForm(Form):
answer = CharField(max_length=4000, widget=TextInput)
class QuestCpanel(ModelForm):
class Meta:
model = Quest
widgets = {
'start': widgets.DateTimePicker(options={"format": "YYYY-MM-DD HH:mm:ss"}),
'end': widgets.DateTimePicker(options={"format": "YYYY-MM-DD HH:mm:ss"})
}
exclude = ('order', 'registered',)
|
2ad2da1d46da54c851d85763fa75baf8fa67d757
|
de187dda5faed88c256d743dc2b15cc25294878d
|
/newton-4.00/applications/toolsAndWrapers/newtonPy/newtonWorld.py
|
26e6ccb89b2bde0464a80e092bb9a6272de082f2
|
[
"Zlib"
] |
permissive
|
MADEAPPS/newton-dynamics
|
77cb9db982f205559bd23b6053281726ea6cf3d5
|
7e8a2e09a44a04a24e2553e203cf50fd8e9dea05
|
refs/heads/master
| 2023-08-30T20:45:08.566223
| 2023-08-30T16:47:29
| 2023-08-30T16:47:29
| 21,649,756
| 1,145
| 248
|
NOASSERTION
| 2023-06-03T16:24:56
| 2014-07-09T11:21:47
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,931
|
py
|
newtonWorld.py
|
# Copyright (c) <2003-2021> <Newton Game Dynamics>
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely
import bpy
import newton
#newtonWorld = newton.ndWorld()
newtonWorld = newton.NewtonWorld()
def NewtonStart(scene):
fps = scene.render.fps / scene.render.fps_base
#timestep = 1.0/fps
#print("nominal time step ", timestep)
newtonWorld.SetSubSteps(1.0/fps)
def NewtonUpdate(scene):
fps = scene.render.fps / scene.render.fps_base
#timestep = 1.0/fps
#print("Frame Change ", scene.frame_current, " timestep ", timestep)
newtonWorld.Update (1.0/fps)
bpy.app.handlers.depsgraph_update_pre.append(NewtonStart)
bpy.app.handlers.frame_change_pre.append(NewtonUpdate)
class NewtonWorldProperties(bpy.types.PropertyGroup):
solverNominalFps: bpy.props.FloatProperty(name= "solver fix fps", description="solve fix frames per seconds", default = 120, min=60, max=600)
solverIterations: bpy.props.IntProperty(name= "solver iterations", description="Set the number of solver iterations per step", default = 4, min=4, max=16)
#my_float_vector : bpy.props.FloatVectorProperty(name= "Scale", soft_min= 0, soft_max= 1000, default= (1,1,1))
#
#my_enum: bpy.props.EnumProperty(
# name= "Enumerator / Dropdown",
# description= "sample text",
# items= [('OP1', "Add Cube", ""),
# ('OP2', "Add Sphere", ""),
# ('OP3', "Add Suzanne", "")
# ]
#)
#class NewtonWorldCreateHomeObject(bpy.types.Operator):
# """Creates a newton world home"""
# bl_label = 'create newton world'
# bl_idname = 'view3d.newton_world_create_home'
# bl_description = "create newton world"
#
# def execute(self, context):
# scene = context.scene
# selectedObjec = context.active_object
# bpy.ops.mesh.primitive_cube_add(size=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
# if selectedObjec == context.active_object:
# print ('change to [object mode] operator canceled')
# return {'CANCELLED'}
#
# context.active_object.name = 'newtonHome'
# world = NewtonWorld(context.active_object)
# scene.newton_world = world
# return {'FINISHED'}
#class NewtonWorldCreate(bpy.types.Operator):
# """Creates a newton world"""
# bl_label = 'create newton world'
# bl_idname = 'view3d.newton_world_create'
# bl_description = "create a newton world"
#
# def execute(self, context):
# scene = context.scene
# world = NewtonWorld(context.active_object)
# scene.newton_world = world
# return {'FINISHED'}
#class NewtonWorldDestroy(bpy.types.Operator):
# """Destroy a newton world"""
# bl_label = 'delete newton world'
# bl_idname = 'view3d.newton_world_destroy'
# bl_description = "destroy a newton world"
#
# def execute(self, context):
# scene = context.scene
#
# scene.newton_world.name = 'newtonHome'
# scene.newton_world = None
# return {'FINISHED'}
class NewtonWorldSetProperty(bpy.types.Operator):
"""newton world set engine properties"""
bl_label = 'newton world set property'
bl_idname = 'view3d.newton_world_set_property'
bl_description = "newton world set property"
def execute(self, context):
scene = context.scene
propertyGroup = scene.newton_world_properties
# set all solve properties
#newtonWorld.SetSubSteps(propertyGroup.solverSubSteps)
newtonWorld.SetTimestep(1.0 / propertyGroup.solverNominalFps)
newtonWorld.SetIterations(propertyGroup.solverIterations)
return {'FINISHED'}
|
8dcd0d522bdcad3a319fce5e9439f281339b73da
|
362196f32e8248e025cb2f6cf0b88f812c9a059c
|
/juriscraper/opinions/united_states/state/nc.py
|
473c637650a1a104375b9f00b517621a5a958997
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
freelawproject/juriscraper
|
0fea8d4bb512808cb1e036aaaf819e9cc0847a6b
|
d2c6672696e13e33ec9981a1901b87047d8108c5
|
refs/heads/main
| 2023-08-09T13:27:21.357915
| 2023-07-06T22:33:01
| 2023-07-06T22:33:01
| 22,757,589
| 283
| 97
|
BSD-2-Clause
| 2023-09-08T22:59:36
| 2014-08-08T12:50:35
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,813
|
py
|
nc.py
|
"""Scraper for North Carolina Supreme Court
CourtID: nc
Court Short Name: N.C.
Reviewer:
History:
2014-05-01: Created by Brian Carver
2014-08-04: Rewritten by Jon Andersen with complete backscraper
"""
import re
import traceback
from datetime import date, datetime
from lxml import html
from juriscraper.lib.exceptions import InsanityException
from juriscraper.OpinionSite import OpinionSite
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = (
"http://appellate.nccourts.org/opinions/?c=sc&year=%s"
% date.today().year
)
self.back_scrape_iterable = list(
range((date.today().year - 1), 1997, -1)
)
self.my_download_urls = []
self.my_case_names = []
self.my_docket_numbers = []
self.my_summaries = []
self.my_neutral_citations = []
self.my_precedential_statuses = []
def _get_case_dates(self):
case_dates = []
case_date = None
precedential_status = "Published"
date_cleaner = r"\d+ \w+ [12][90]\d\d"
path = "//table//tr"
for row_el in self.html.xpath(path):
# Examine each row. If it contains the date, we set that as
# the current date. If it contains a case, we parse it.
try:
date_nodes = row_el.xpath(".//strong/text()")
date_str = date_nodes[0]
if date_nodes:
date_str = re.search(
date_cleaner, date_str, re.MULTILINE
).group()
case_date = datetime.strptime(date_str, "%d %B %Y").date()
# When a new date header appears, switch to Precedential
precedential_status = "Published"
continue # Row contained just the date, move on
except IndexError:
# No matching nodes; not a date header
pass
path = "./td[contains(., 'Unpublished Opinions - Rule 30e')]"
if row_el.xpath(path):
precedential_status = "Unpublished"
# When this header appears, switch to Nonprecedential, then
# press on to the following rows.
continue
if precedential_status == "Published":
urls = row_el.xpath("./td/span/span[1]/@onclick")
# Like: viewOpinion("http://appellate.nccourts.org/opinions/?c=1&pdf=31511")
if len(urls) != 1 or urls[0].find("viewOpinion") != 0:
continue # Only interested in cases with a download link
# Pull the URL out of the javascript viewOpinion function.
download_url = re.search(
r'viewopinion\("(.*)"', urls[0], re.IGNORECASE
).group(1)
path = "./td/span/span[contains(@class,'title')]"
txt = html.tostring(
row_el.xpath(path)[0], method="text", encoding="unicode"
)
case_name, neutral_cite, docket_number = self.parse_title(txt)
summary = ""
path = "./td/span/span[contains(@class,'desc')]/text()"
summaries = row_el.xpath(path)
try:
summary = summaries[0]
except IndexError:
# Not all cases have a summary
pass
if case_name.strip() == "":
continue # A few cases are missing a name
case_dates.append(case_date)
self.my_download_urls.append(download_url)
self.my_case_names.append(case_name)
self.my_docket_numbers.append(docket_number)
self.my_summaries.append(summary)
self.my_neutral_citations.append(neutral_cite)
self.my_precedential_statuses.append(precedential_status)
elif precedential_status == "Unpublished":
for span in row_el.xpath("./td/span"):
if "onclick" not in span.attrib:
continue
download_url = re.search(
r'viewopinion\("(.*)"',
span.attrib["onclick"],
re.IGNORECASE,
).group(1)
txt = span.text_content().strip()
(
case_name,
neutral_cite,
docket_number,
) = self.parse_title(txt)
if case_name.strip() == "":
continue # A few cases are missing a name
case_dates.append(case_date)
self.my_download_urls.append(download_url)
self.my_case_names.append(case_name)
self.my_docket_numbers.append(docket_number)
self.my_summaries.append("")
self.my_neutral_citations.append(neutral_cite)
self.my_precedential_statuses.append(precedential_status)
return case_dates
# Parses case titles like:
# Fields v. Harnett Cnty., 367 NC 12 (13-761)
# Clark v. Clark, (13-612)
@staticmethod
def parse_title(txt):
try:
name_and_citation = txt.rsplit("(", 1)[0].strip()
docket_number = (
re.search(r"(.*\d).*?", txt.rsplit("(", 1)[1]).group(0).strip()
)
case_name = name_and_citation.rsplit(",", 1)[0].strip()
try:
neutral_cite = name_and_citation.rsplit(",", 1)[1].strip()
if not re.search(r"^\d\d.*\d\d$", neutral_cite):
neutral_cite = ""
except IndexError:
# Unable to find comma to split on. No neutral cite.
neutral_cite = ""
except:
raise InsanityException(
f"Unable to parse: {txt}\n{traceback.format_exc()}"
)
return case_name, neutral_cite, docket_number
def _get_download_urls(self):
return self.my_download_urls
def _get_case_names(self):
return self.my_case_names
def _get_docket_numbers(self):
return self.my_docket_numbers
def _get_summaries(self):
return self.my_summaries
def _get_citations(self):
return self.my_neutral_citations
def _get_precedential_statuses(self):
return self.my_precedential_statuses
def _download_backwards(self, year):
self.url = f"http://appellate.nccourts.org/opinions/?c=sc&year={year}"
self.html = self._download()
|
5b0118114f43dfff9864fed56a0de1e5434ef0c5
|
4e83833ce426ec867bc26252f7aa1d9dd940092f
|
/pfrl/experiments/train_agent_batch.py
|
add7cda81d597f2a93ab8fed51bdf038974c4078
|
[
"MIT"
] |
permissive
|
pfnet/pfrl
|
66803adbaeb33bf7a71b4dbaa17b76f5cc21fd1b
|
b29533b77c82f88fbc074714274ee8aa63dc270f
|
refs/heads/master
| 2023-07-25T23:47:01.208427
| 2023-07-16T15:16:10
| 2023-07-16T15:16:10
| 274,629,316
| 1,115
| 167
|
MIT
| 2023-07-16T15:16:11
| 2020-06-24T09:31:50
|
Python
|
UTF-8
|
Python
| false
| false
| 9,144
|
py
|
train_agent_batch.py
|
import logging
import os
from collections import deque
import numpy as np
from pfrl.experiments.evaluator import Evaluator, save_agent
def train_agent_batch(
agent,
env,
steps,
outdir,
checkpoint_freq=None,
log_interval=None,
max_episode_len=None,
step_offset=0,
evaluator=None,
successful_score=None,
step_hooks=(),
return_window_size=100,
logger=None,
):
"""Train an agent in a batch environment.
Args:
agent: Agent to train.
env: Environment to train the agent against.
steps (int): Number of total time steps for training.
outdir (str): Path to the directory to output things.
checkpoint_freq (int): frequency at which agents are stored.
log_interval (int): Interval of logging.
max_episode_len (int): Maximum episode length.
step_offset (int): Time step from which training starts.
return_window_size (int): Number of training episodes used to estimate
the average returns of the current agent.
successful_score (float): Finish training if the mean score is greater
or equal to thisvalue if not None
step_hooks (Sequence): Sequence of callable objects that accepts
(env, agent, step) as arguments. They are called every step.
See pfrl.experiments.hooks.
logger (logging.Logger): Logger used in this function.
Returns:
List of evaluation episode stats dict.
"""
logger = logger or logging.getLogger(__name__)
recent_returns = deque(maxlen=return_window_size)
num_envs = env.num_envs
episode_r = np.zeros(num_envs, dtype=np.float64)
episode_idx = np.zeros(num_envs, dtype="i")
episode_len = np.zeros(num_envs, dtype="i")
# o_0, r_0
obss = env.reset()
t = step_offset
if hasattr(agent, "t"):
agent.t = step_offset
eval_stats_history = [] # List of evaluation episode stats dict
try:
while True:
# a_t
actions = agent.batch_act(obss)
# o_{t+1}, r_{t+1}
obss, rs, dones, infos = env.step(actions)
episode_r += rs
episode_len += 1
# Compute mask for done and reset
if max_episode_len is None:
resets = np.zeros(num_envs, dtype=bool)
else:
resets = episode_len == max_episode_len
resets = np.logical_or(
resets, [info.get("needs_reset", False) for info in infos]
)
# Agent observes the consequences
agent.batch_observe(obss, rs, dones, resets)
# Make mask. 0 if done/reset, 1 if pass
end = np.logical_or(resets, dones)
not_end = np.logical_not(end)
# For episodes that ends, do the following:
# 1. increment the episode count
# 2. record the return
# 3. clear the record of rewards
# 4. clear the record of the number of steps
# 5. reset the env to start a new episode
# 3-5 are skipped when training is already finished.
episode_idx += end
recent_returns.extend(episode_r[end])
for _ in range(num_envs):
t += 1
if checkpoint_freq and t % checkpoint_freq == 0:
save_agent(agent, t, outdir, logger, suffix="_checkpoint")
for hook in step_hooks:
hook(env, agent, t)
if (
log_interval is not None
and t >= log_interval
and t % log_interval < num_envs
):
logger.info(
"outdir:{} step:{} episode:{} last_R: {} average_R:{}".format( # NOQA
outdir,
t,
np.sum(episode_idx),
recent_returns[-1] if recent_returns else np.nan,
np.mean(recent_returns) if recent_returns else np.nan,
)
)
logger.info("statistics: {}".format(agent.get_statistics()))
if evaluator:
eval_score = evaluator.evaluate_if_necessary(
t=t, episodes=np.sum(episode_idx)
)
if eval_score is not None:
eval_stats = dict(agent.get_statistics())
eval_stats["eval_score"] = eval_score
eval_stats_history.append(eval_stats)
if (
successful_score is not None
and evaluator.max_score >= successful_score
):
break
if t >= steps:
break
# Start new episodes if needed
episode_r[end] = 0
episode_len[end] = 0
obss = env.reset(not_end)
except (Exception, KeyboardInterrupt):
# Save the current model before being killed
save_agent(agent, t, outdir, logger, suffix="_except")
env.close()
if evaluator:
evaluator.env.close()
raise
else:
# Save the final model
save_agent(agent, t, outdir, logger, suffix="_finish")
return eval_stats_history
def train_agent_batch_with_evaluation(
agent,
env,
steps,
eval_n_steps,
eval_n_episodes,
eval_interval,
outdir,
checkpoint_freq=None,
max_episode_len=None,
step_offset=0,
eval_max_episode_len=None,
return_window_size=100,
eval_env=None,
log_interval=None,
successful_score=None,
step_hooks=(),
evaluation_hooks=(),
save_best_so_far_agent=True,
use_tensorboard=False,
logger=None,
):
"""Train an agent while regularly evaluating it.
Args:
agent: Agent to train.
env: Environment train the againt against.
steps (int): Number of total time steps for training.
eval_n_steps (int): Number of timesteps at each evaluation phase.
eval_n_runs (int): Number of runs for each time of evaluation.
eval_interval (int): Interval of evaluation.
outdir (str): Path to the directory to output things.
log_interval (int): Interval of logging.
checkpoint_freq (int): frequency with which to store networks
max_episode_len (int): Maximum episode length.
step_offset (int): Time step from which training starts.
return_window_size (int): Number of training episodes used to estimate
the average returns of the current agent.
eval_max_episode_len (int or None): Maximum episode length of
evaluation runs. If set to None, max_episode_len is used instead.
eval_env: Environment used for evaluation.
successful_score (float): Finish training if the mean score is greater
or equal to thisvalue if not None
step_hooks (Sequence): Sequence of callable objects that accepts
(env, agent, step) as arguments. They are called every step.
See pfrl.experiments.hooks.
evaluation_hooks (Sequence): Sequence of
pfrl.experiments.evaluation_hooks.EvaluationHook objects. They are
called after each evaluation.
save_best_so_far_agent (bool): If set to True, after each evaluation,
if the score (= mean return of evaluation episodes) exceeds
the best-so-far score, the current agent is saved.
use_tensorboard (bool): Additionally log eval stats to tensorboard
logger (logging.Logger): Logger used in this function.
Returns:
agent: Trained agent.
eval_stats_history: List of evaluation episode stats dict.
"""
logger = logger or logging.getLogger(__name__)
for hook in evaluation_hooks:
if not hook.support_train_agent_batch:
raise ValueError(
"{} does not support train_agent_batch_with_evaluation().".format(hook)
)
os.makedirs(outdir, exist_ok=True)
if eval_env is None:
eval_env = env
if eval_max_episode_len is None:
eval_max_episode_len = max_episode_len
evaluator = Evaluator(
agent=agent,
n_steps=eval_n_steps,
n_episodes=eval_n_episodes,
eval_interval=eval_interval,
outdir=outdir,
max_episode_len=eval_max_episode_len,
env=eval_env,
step_offset=step_offset,
evaluation_hooks=evaluation_hooks,
save_best_so_far_agent=save_best_so_far_agent,
use_tensorboard=use_tensorboard,
logger=logger,
)
eval_stats_history = train_agent_batch(
agent,
env,
steps,
outdir,
checkpoint_freq=checkpoint_freq,
max_episode_len=max_episode_len,
step_offset=step_offset,
evaluator=evaluator,
successful_score=successful_score,
return_window_size=return_window_size,
log_interval=log_interval,
step_hooks=step_hooks,
logger=logger,
)
return agent, eval_stats_history
|
9299973a0cdcffec161efe881bad747b5d1005c9
|
3ec38f732b21b0a00e822dac730bdc1748902144
|
/test/unit/oscar/test_controller.py
|
7324a762ca918219f9a24af60246b40e120ff242
|
[
"Apache-2.0"
] |
permissive
|
grycap/scar
|
e5594c1eb79a0730409c97d48bc511757a05dcbd
|
e6c8b06a43b310d2c1e58d7826239e259dd826d7
|
refs/heads/master
| 2023-08-22T00:39:28.004454
| 2023-05-22T11:01:10
| 2023-05-22T11:01:10
| 91,441,209
| 613
| 59
|
Apache-2.0
| 2022-11-29T06:36:46
| 2017-05-16T09:35:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,563
|
py
|
test_controller.py
|
#! /usr/bin/python
# Copyright (C) GRyCAP - I3M - UPV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
import tempfile
from mock import MagicMock
from mock import patch
sys.path.append("..")
sys.path.append(".")
sys.path.append("../..")
from scar.providers.oscar.controller import OSCAR
class TestOSCARController(unittest.TestCase):
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
@patch('scar.providers.oscar.controller.OSCARClient')
@patch('scar.providers.aws.controller.FileUtils.load_tmp_config_file')
def test_init(self, load_tmp_config_file, oscar_client):
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(b'Hello world!')
tmpfile.close()
load_tmp_config_file.return_value = {"functions": {"oscar": [{"my_oscar": {"name": "oname",
"script": tmpfile.name}}]}}
ocli = MagicMock(['create_service'])
oscar_client.return_value = ocli
OSCAR('init')
os.unlink(tmpfile.name)
res = {'name': 'oname', 'script': 'Hello world!',
'cluster_id': 'my_oscar', 'storage_providers': {}}
self.assertEqual(ocli.create_service.call_args_list[0][1], res)
@patch('scar.providers.oscar.controller.OSCARClient')
@patch('scar.providers.aws.controller.FileUtils.load_tmp_config_file')
def test_rm(self, load_tmp_config_file, oscar_client):
load_tmp_config_file.return_value = {"functions": {"oscar": [{"my_oscar": {"name": "oname",
"script": "some.sh"}}]}}
ocli = MagicMock(['delete_service'])
oscar_client.return_value = ocli
OSCAR('rm')
self.assertEqual(ocli.delete_service.call_args_list[0][0][0], 'oname')
@patch('scar.providers.oscar.controller.OSCARClient')
@patch('scar.providers.aws.controller.FileUtils.load_tmp_config_file')
def test_ls(self, load_tmp_config_file, oscar_client):
load_tmp_config_file.return_value = {"functions": {"oscar": [{"my_oscar": {"name": "oname",
"script": "some.sh",
"endpoint": "http://some.es",
"auth_user": "user",
"auth_password": "pass",
"ssl_verify": False}}]}}
ocli = MagicMock(['list_services'])
ocli.list_services.return_value = [{'name': 'fname', 'memory': '256Mi',
'cpu': '1.0', 'image': 'some/image:tag'}]
oscar_client.return_value = ocli
OSCAR('ls')
self.assertEqual(ocli.list_services.call_count, 1)
|
58082603ddfde3181876ef291bf603607548cb9a
|
0a16415a4e7657570b95e96c16a9bffbeeddd279
|
/kapture/core/Observations.py
|
f9cd002aa2b3f2774dcd7c7d83377285d12d2000
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
naver/kapture
|
13fda56217751ac12c77f0be658815ea6a598fa8
|
392677fb974aaa7c9c7c65d05c09abdb8a681124
|
refs/heads/main
| 2023-08-24T16:25:20.195565
| 2023-03-29T15:58:07
| 2023-03-29T15:58:07
| 275,164,995
| 386
| 64
|
BSD-3-Clause
| 2023-09-04T15:48:49
| 2020-06-26T13:40:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,470
|
py
|
Observations.py
|
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
from typing import Dict, List, Tuple, Union
class Observations(Dict[int, Dict[str, List[Tuple[str, int]]]]):
"""
Observations. This can be used like this:
- observations[point3d_idx][keypoints_type] = list( observation )
- observation = (image_path, keypoint_idx)
"""
def add(self, point3d_idx: int, keypoints_type: str, image_filename: str, keypoint_idx: int):
"""
Adds a 2-D observation (image, keypoint) of a 3D point.
:param point3d_idx: index of the 3D point to add an observation of.
:param keypoints_type: type of keypoints, name of the keypoints subfolder
:param image_filename: name of the image where the 3D points is observed
:param keypoint_idx: index of the keypoints in the image that correspond to the 3D point.
:return:
"""
# enforce type checking
if not isinstance(point3d_idx, int):
raise TypeError('invalid type for point3d_idx')
if not isinstance(keypoints_type, str):
raise TypeError('invalid type for keypoints_type')
if not isinstance(image_filename, str):
raise TypeError('invalid type for image_filename')
if not isinstance(keypoint_idx, int):
raise TypeError('invalid type for keypoint_idx')
self.setdefault(point3d_idx, {}).setdefault(keypoints_type, []).append((image_filename, keypoint_idx))
def __getitem__(self, key: Union[int, Tuple[int, str]]) -> Union[Dict[str, List[Tuple[str, int]]],
List[Tuple[str, int]]]:
if isinstance(key, tuple):
# key is a pair of (point3d_idx, keypoints_type)
point3d_idx = key[0]
keypoints_type = key[1]
if not isinstance(point3d_idx, int):
raise TypeError('invalid point3d_idx')
if not isinstance(keypoints_type, str):
raise TypeError('invalid keypoints_type')
return super(Observations, self).__getitem__(point3d_idx)[keypoints_type]
elif isinstance(key, int):
# key is a point3d_idx
return super(Observations, self).__getitem__(key)
else:
raise TypeError('key must be Union[int, Tuple[int, str]]')
def key_pairs(self) -> List[Tuple[int, str]]:
"""
Returns the list of (point3d_idx, keypoints_type) contained in observations.
Those pairs can be used to access a list of observation.
:return: list of (point3d_idx, keypoints_type)
"""
return [
(point3d_idx, keypoints_type)
for point3d_idx, per_feature_observations in self.items()
for keypoints_type in per_feature_observations.keys()
]
def observations_number(self) -> int:
"""
Get the number of observations
"""
nb = 0
for per_feature_observations in self.values():
for observations_list in per_feature_observations.values():
nb += len(observations_list)
return nb
def __contains__(self, key: Union[int, Tuple[int, str]]):
if isinstance(key, tuple):
# key is a pair of (point3d_idx, keypoints_type)
point3d_idx = key[0]
keypoints_type = key[1]
if not isinstance(point3d_idx, int):
raise TypeError('invalid point3d_idx')
if not isinstance(keypoints_type, str):
raise TypeError('invalid keypoints_type')
return super(Observations, self).__contains__(point3d_idx) and keypoints_type in self[point3d_idx]
elif isinstance(key, int):
return super(Observations, self).__contains__(key)
else:
raise TypeError('key must be Union[int, Tuple[int, str]]')
def __repr__(self) -> str:
representation = ''
# [point3d_idx, keypoints_type]: (image_path, keypoint_idx) (image_path, keypoint_idx)...
for point3d_idx, keypoints_type in sorted(self.key_pairs(), key=lambda x: x[0]):
representation += f'[{point3d_idx:05}, {keypoints_type}]: '
assert point3d_idx is not None
for image_path, keypoint_idx in self.get(point3d_idx)[keypoints_type]:
representation += f'\t({image_path}, {keypoint_idx})'
representation += '\n'
return representation
|
8be55f05e28060e88811edd14ec7b5b4ec4c660e
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/sdb/redis_sdb.py
|
1419e3813c8f4026ef6224bf61f7f644bf0019a7
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,766
|
py
|
redis_sdb.py
|
"""
Redis SDB module
================
.. versionadded:: 2019.2.0
This module allows access to Redis using an ``sdb://`` URI.
Like all SDB modules, the Redis module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. For example:
.. code-block:: yaml
sdb_redis:
driver: redis
host: 127.0.0.1
port: 6379
password: pass
db: 1
The ``driver`` refers to the Redis module, all other options are optional.
For option details see: https://redis-py.readthedocs.io/en/latest/.
"""
try:
import redis
HAS_REDIS = True
except ImportError:
HAS_REDIS = False
__func_alias__ = {"set_": "set"}
__virtualname__ = "redis"
def __virtual__():
"""
Module virtual name.
"""
if not HAS_REDIS:
return (False, "Please install python-redis to use this SDB module.")
return __virtualname__
def set_(key, value, profile=None):
"""
Set a value into the Redis SDB.
"""
if not profile:
return False
redis_kwargs = profile.copy()
redis_kwargs.pop("driver")
redis_conn = redis.StrictRedis(**redis_kwargs)
return redis_conn.set(key, value)
def get(key, profile=None):
"""
Get a value from the Redis SDB.
"""
if not profile:
return False
redis_kwargs = profile.copy()
redis_kwargs.pop("driver")
redis_conn = redis.StrictRedis(**redis_kwargs)
return redis_conn.get(key)
def delete(key, profile=None):
"""
Delete a key from the Redis SDB.
"""
if not profile:
return False
redis_kwargs = profile.copy()
redis_kwargs.pop("driver")
redis_conn = redis.StrictRedis(**redis_kwargs)
return redis_conn.delete(key)
|
1b97162336d9a7ed7a8264f7889942c8b62ecb81
|
6be59c81f3f6a17c14b812be0de3346a82eb33dd
|
/data_science/pandas_demo/groupby_demo.py
|
d271010c6890d7e79b5aeef9613513d7639a291f
|
[] |
no_license
|
chunhuizhang/bilibili_vlogs
|
6851fdcd43f08fcf7195e345b0bc85d99c0b9128
|
0efd921b24f2af43f5972ea6909deb2fc069d305
|
refs/heads/master
| 2023-08-17T15:47:04.299072
| 2023-08-14T13:46:31
| 2023-08-14T13:46:31
| 220,612,967
| 170
| 70
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
groupby_demo.py
|
import pandas as pd
import numpy as np
df = pd.DataFrame([('bird', 'Falconiformes', 389.0),
('bird', 'Psittaciformes', 24.0),
('mammal', 'Carnivora', 80.2),
('mammal', 'Primates', np.nan),
('mammal', 'Carnivora', 58)],
index=['falcon', 'parrot', 'lion', 'monkey', 'leopard'],
columns=('class', 'order', 'max_speed'))
print(df)
|
4e51dcf6a4dce59c7130c17f73fc72ad79d0dae5
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/providers/cncf/kubernetes/kubernetes_helper_functions.py
|
b54519fe27339b471e77a6a81d5f7773677f16da
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,741
|
py
|
kubernetes_helper_functions.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import secrets
import string
from typing import TYPE_CHECKING
import pendulum
from slugify import slugify
from airflow.compat.functools import cache
from airflow.configuration import conf
if TYPE_CHECKING:
from airflow.models.taskinstancekey import TaskInstanceKey
log = logging.getLogger(__name__)
alphanum_lower = string.ascii_lowercase + string.digits
def rand_str(num):
"""Generate random lowercase alphanumeric string of length num.
:meta private:
"""
return "".join(secrets.choice(alphanum_lower) for _ in range(num))
def add_pod_suffix(*, pod_name: str, rand_len: int = 8, max_len: int = 80) -> str:
"""Add random string to pod name while staying under max length.
:param pod_name: name of the pod
:param rand_len: length of the random string to append
:param max_len: maximum length of the pod name
:meta private:
"""
suffix = "-" + rand_str(rand_len)
return pod_name[: max_len - len(suffix)].strip("-.") + suffix
def create_pod_id(
dag_id: str | None = None,
task_id: str | None = None,
*,
max_length: int = 80,
unique: bool = True,
) -> str:
"""
Generates unique pod ID given a dag_id and / or task_id.
The default of 80 for max length is somewhat arbitrary, mainly a balance between
content and not overwhelming terminal windows of reasonable width. The true
upper limit is 253, and this is enforced in construct_pod.
:param dag_id: DAG ID
:param task_id: Task ID
:param max_length: max number of characters
:param unique: whether a random string suffix should be added
:return: A valid identifier for a kubernetes pod name
"""
if not (dag_id or task_id):
raise ValueError("Must supply either dag_id or task_id.")
name = ""
if dag_id:
name += dag_id
if task_id:
if name:
name += "-"
name += task_id
base_name = slugify(name, lowercase=True)[:max_length].strip(".-")
if unique:
return add_pod_suffix(pod_name=base_name, rand_len=8, max_len=max_length)
else:
return base_name
def annotations_to_key(annotations: dict[str, str]) -> TaskInstanceKey:
"""Build a TaskInstanceKey based on pod annotations."""
log.debug("Creating task key for annotations %s", annotations)
dag_id = annotations["dag_id"]
task_id = annotations["task_id"]
try_number = int(annotations["try_number"])
annotation_run_id = annotations.get("run_id")
map_index = int(annotations.get("map_index", -1))
# Compat: Look up the run_id from the TI table!
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
from airflow.settings import Session
if not annotation_run_id and "execution_date" in annotations:
execution_date = pendulum.parse(annotations["execution_date"])
# Do _not_ use create-session, we don't want to expunge
session = Session()
task_instance_run_id = (
session.query(TaskInstance.run_id)
.join(TaskInstance.dag_run)
.filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id == task_id,
DagRun.execution_date == execution_date,
)
.scalar()
)
else:
task_instance_run_id = annotation_run_id
return TaskInstanceKey(
dag_id=dag_id,
task_id=task_id,
run_id=task_instance_run_id,
try_number=try_number,
map_index=map_index,
)
@cache
def get_logs_task_metadata() -> bool:
return conf.getboolean("kubernetes_executor", "logs_task_metadata")
def annotations_for_logging_task_metadata(annotation_set):
if get_logs_task_metadata():
annotations_for_logging = annotation_set
else:
annotations_for_logging = "<omitted>"
return annotations_for_logging
|
ad09f1178489249f0bbe1ac89b8c04992d14a89e
|
11d0cc8470722b737ac1d83610f227ffb98ea71a
|
/convert/ahf2csv.py
|
806bfe4ea9ced908b9a5b9164efc7b63a5b04e65
|
[] |
no_license
|
EdoardoCarlesi/PyRCODIO
|
d86e5298877f08b597f92a07aac0c9b634dbfa39
|
d3f84efb7aeec0032ef6bde839fe9440ee82b392
|
refs/heads/master
| 2021-04-03T04:58:26.219884
| 2021-03-21T12:22:19
| 2021-03-21T12:22:19
| 125,036,704
| 148
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
ahf2csv.py
|
'''
Python Routines for COsmology and Data I/ (PyRCODIO) v0.2
Edoardo Carlesi 2020
ecarlesi83@gmail.com
ahf2csv.py: convert (and compress) AHF halo catalogs to csv files
'''
import pandas as pd
import sys
sys.path.insert(1, '/home/edoardo/CLUES/PyRCODIO/')
import read_files as rf
this_ahf = sys.argv[1]
mpi = sys.argv[2]
out_file = this_ahf + '.csv'
halo_df = rf.read_ahf_halo(this_ahf, file_mpi=mpi)
halo_df.to_csv(out_file)
|
23f8b0d0848b9d4a6fb37bd5ce49e98ce656ff37
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/test/connector/exchange/coinbase_pro/test_coinbase_pro_user_stream_tracker.py
|
38a0ad4808843b1dbbefe7b2c59b1a95e2bf24ad
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 8,164
|
py
|
test_coinbase_pro_user_stream_tracker.py
|
import asyncio
import contextlib
import logging
import time
import unittest
from decimal import Decimal
from typing import Optional
import conf
from hummingbot.connector.exchange.coinbase_pro.coinbase_pro_exchange import CoinbaseProAuth, CoinbaseProExchange
from hummingbot.connector.exchange.coinbase_pro.coinbase_pro_order_book_message import CoinbaseProOrderBookMessage
from hummingbot.connector.exchange.coinbase_pro.coinbase_pro_user_stream_tracker import CoinbaseProUserStreamTracker
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.data_type.common import OrderType
from hummingbot.core.utils.async_utils import (
safe_ensure_future,
safe_gather,
)
class CoinbaseProUserStreamTrackerUnitTest(unittest.TestCase):
user_stream_tracker: Optional[CoinbaseProUserStreamTracker] = None
market: CoinbaseProExchange
stack: contextlib.ExitStack
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.coinbase_pro_auth = CoinbaseProAuth(conf.coinbase_pro_api_key,
conf.coinbase_pro_secret_key,
conf.coinbase_pro_passphrase)
cls.trading_pairs = ["ETH-USDC"]
cls.user_stream_tracker: CoinbaseProUserStreamTracker = CoinbaseProUserStreamTracker(
coinbase_pro_auth=cls.coinbase_pro_auth, trading_pairs=cls.trading_pairs)
cls.user_stream_tracker_task: asyncio.Task = safe_ensure_future(cls.user_stream_tracker.start())
cls.clock: Clock = Clock(ClockMode.REALTIME)
cls.market: CoinbaseProExchange = CoinbaseProExchange(
conf.coinbase_pro_api_key,
conf.coinbase_pro_secret_key,
conf.coinbase_pro_passphrase,
trading_pairs=cls.trading_pairs
)
print("Initializing Coinbase Pro market... this will take about a minute.")
cls.clock.add_iterator(cls.market)
cls.stack = contextlib.ExitStack()
cls._clock = cls.stack.enter_context(cls.clock)
cls.ev_loop.run_until_complete(cls.wait_til_ready())
print("Ready.")
@classmethod
async def wait_til_ready(cls):
while True:
now = time.time()
next_iteration = now // 1.0 + 1
if cls.market.ready:
break
else:
await cls._clock.run_til(next_iteration)
await asyncio.sleep(1.0)
async def run_parallel_async(self, *tasks):
future: asyncio.Future = safe_ensure_future(safe_gather(*tasks))
while not future.done():
now = time.time()
next_iteration = now // 1.0 + 1
await self.clock.run_til(next_iteration)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def test_limit_order_cancelled(self):
"""
This test should be run after the developer has implemented the limit buy and cancel
in the corresponding market class
"""
self.assertGreater(self.market.get_balance("ETH"), Decimal("0.1"))
trading_pair = self.trading_pairs[0]
amount: Decimal = Decimal("0.02")
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
current_bid_price: Decimal = self.market.get_price(trading_pair, True)
bid_price: Decimal = current_bid_price * Decimal("0.8")
quantize_bid_price: Decimal = self.market.quantize_order_price(trading_pair, bid_price)
client_order_id = self.market.buy(trading_pair, quantized_amount, OrderType.LIMIT, quantize_bid_price)
self.ev_loop.run_until_complete(asyncio.sleep(5.0))
[open_message] = self.run_parallel(self.user_stream_tracker.user_stream.get())
# print(open_message)
self.assertTrue(isinstance(open_message, CoinbaseProOrderBookMessage))
self.assertEqual(open_message.trading_pair, trading_pair)
self.assertEqual(open_message.content["type"], "open")
self.assertEqual(open_message.content["side"], "buy")
self.assertEqual(open_message.content["product_id"], trading_pair)
self.assertEqual(Decimal(open_message.content["price"]), quantize_bid_price)
self.assertEqual(Decimal(open_message.content["remaining_size"]), quantized_amount)
self.run_parallel(asyncio.sleep(5.0))
self.market.cancel(trading_pair, client_order_id)
self.ev_loop.run_until_complete(asyncio.sleep(5.0))
[done_message] = self.run_parallel(self.user_stream_tracker.user_stream.get())
# print(done_message)
self.assertEqual(done_message.trading_pair, trading_pair)
self.assertEqual(done_message.content["type"], "done")
self.assertEqual(done_message.content["side"], "buy")
self.assertEqual(done_message.content["product_id"], trading_pair)
self.assertEqual(Decimal(done_message.content["price"]), quantize_bid_price)
self.assertEqual(Decimal(done_message.content["remaining_size"]), quantized_amount)
self.assertEqual(done_message.content["reason"], "canceled")
@unittest.skip
def test_limit_order_filled(self):
"""
This test should be run after the developer has implemented the limit buy in the corresponding market class
"""
self.assertGreater(self.market.get_balance("ETH"), Decimal("0.1"))
trading_pair = self.trading_pairs[0]
amount: Decimal = Decimal("0.02")
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
current_bid_price: Decimal = self.market.get_price(trading_pair, True)
bid_price: Decimal = current_bid_price * Decimal("1.05")
quantize_bid_price: Decimal = self.market.quantize_order_price(trading_pair, bid_price)
self.market.buy(trading_pair, quantized_amount, OrderType.LIMIT, quantize_bid_price)
self.ev_loop.run_until_complete(asyncio.sleep(5.0))
[message_1, message_2] = self.run_parallel(self.user_stream_tracker.user_stream.get(),
self.user_stream_tracker.user_stream.get())
self.assertTrue(isinstance(message_1, CoinbaseProOrderBookMessage))
self.assertTrue(isinstance(message_2, CoinbaseProOrderBookMessage))
if message_1.content["type"] == "done":
done_message = message_1
match_message = message_2
else:
done_message = message_2
match_message = message_1
# print(done_message)
self.assertEqual(done_message.trading_pair, trading_pair)
self.assertEqual(done_message.content["type"], "done")
self.assertEqual(done_message.content["side"], "buy")
self.assertEqual(done_message.content["product_id"], trading_pair)
self.assertEqual(Decimal(done_message.content["price"]), quantize_bid_price)
self.assertEqual(Decimal(done_message.content["remaining_size"]), Decimal(0.0))
self.assertEqual(done_message.content["reason"], "filled")
# print(match_message)
self.assertEqual(match_message.trading_pair, trading_pair)
self.assertEqual(match_message.content["type"], "match")
self.assertEqual(match_message.content["side"], "sell")
self.assertEqual(match_message.content["product_id"], trading_pair)
self.assertLessEqual(Decimal(match_message.content["price"]), quantize_bid_price)
self.assertEqual(Decimal(match_message.content["size"]), quantized_amount)
@unittest.skip
def test_user_stream_manually(self):
"""
This test should be run before market functions like buy and sell are implemented.
Developer needs to manually trigger those actions in order for the messages to show up in the user stream.
"""
self.ev_loop.run_until_complete(asyncio.sleep(30.0))
print(self.user_stream_tracker.user_stream)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
|
54f4101301999c6518e86ac766c527a2ea42b7d4
|
d8810093406a2ba401ac6ea300ed414bfab6b6a0
|
/tests/test_dataset.py
|
2fee9e315c115ec37a3f80b24bb558cd3700329d
|
[
"Apache-2.0"
] |
permissive
|
mittagessen/kraken
|
11b8eeaff5dcfaa62a96b0af73ebe65bc2d9bef2
|
3e966a3e5c881394b882da95fa5941c4305aec43
|
refs/heads/main
| 2023-08-29T05:52:06.301403
| 2023-08-17T11:25:56
| 2023-08-17T11:25:56
| 35,872,353
| 565
| 140
|
Apache-2.0
| 2023-09-08T10:28:33
| 2015-05-19T09:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 11,240
|
py
|
test_dataset.py
|
# -*- coding: utf-8 -*-
import unittest
from pathlib import Path
from pytest import raises
from PIL import Image
from kraken.lib.dataset import ImageInputTransforms, BaselineSet
from kraken.lib.util import is_bitonal
from kraken.lib.exceptions import KrakenInputException
thisfile = Path(__file__).resolve().parent
resources = thisfile / 'resources'
def check_output(self, config, im, output_tensor):
if config['height'] != 0:
self.assertEqual(config['height'], output_tensor.shape[1])
if config['width'] != 0:
self.assertEqual(config['width'], output_tensor.shape[2])
if config['force_binarization'] or is_bitonal(im):
self.assertEqual(len(output_tensor.int().unique()), 2)
if config['channels'] == 3:
self.assertEqual(output_tensor.shape[0], 3)
class TestBaselineSet(unittest.TestCase):
"""
Tests for the BaselineSet segmentation dataset class
"""
def setUp(self):
self.doc = resources / '170025120000003,0074.xml'
self.transforms = ImageInputTransforms(batch=1,
height=200,
width=100,
channels=1,
pad=0)
def test_baselineset_simple_xml(self):
"""
Tests simple BaselineSet instantiation
"""
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 10)
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_simple_valid_baselines(self):
"""
Test baseline whitelisting in BaselineSet
"""
# filter out $pac and $pag baseline classes
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_baselines=['$par', '$tip'],
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 8)
self.assertEqual(set(ds.class_mapping['baselines'].keys()), set(('$tip', '$par')))
self.assertNotIn('$pac', ds.class_mapping['baselines'])
self.assertNotIn('$pag', ds.class_mapping['baselines'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_simple_valid_regions(self):
"""
Test region whitelisting in BaselineSet
"""
# filter out $tip and $par regions
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_regions=['$pag', '$pac'],
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 8)
self.assertEqual(set(ds.class_mapping['regions'].keys()), set(('$pag', '$pac')))
self.assertNotIn('$par', ds.class_mapping['regions'])
self.assertNotIn('$tip', ds.class_mapping['regions'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_simple_merge_baselines(self):
"""
Test baseline merging in BaselineSet
"""
# merge $par into $tip
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
merge_baselines={'$par': '$tip'},
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 9)
self.assertEqual(set(ds.class_mapping['baselines'].keys()), set(('$tip', '$pag', '$pac')))
self.assertEqual(len(ds.targets[0]['baselines']['$tip']), 18)
self.assertNotIn('$par', ds.class_mapping['baselines'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_merge_after_valid_baselines(self):
"""
Test that filtering with valid_baselines occurs before merging.
"""
# merge $par and $pac into $tip but discard $par before
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_baselines=('$tip', '$pac'),
merge_baselines={'$par': '$tip', '$pac': '$tip'},
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 7)
self.assertEqual(set(ds.class_mapping['baselines'].keys()), set(('$tip',)))
self.assertEqual(len(ds.targets[0]['baselines']['$tip']), 26)
self.assertNotIn('$par', ds.class_mapping['baselines'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
def test_baselineset_merge_after_valid_regions(self):
"""
Test that filtering with valid_regions occurs before merging.
"""
# merge $par and $pac into $tip but discard $par before
ds = BaselineSet(imgs=[self.doc, self.doc],
im_transforms=self.transforms,
valid_regions=('$tip', '$pac'),
merge_regions={'$par': '$tip', '$pac': '$tip'},
mode='xml')
sample = ds[0]
self.assertEqual(len(ds), 2)
self.assertEqual(ds.num_classes, 7)
self.assertEqual(set(ds.class_mapping['regions'].keys()), set(('$tip',)))
self.assertEqual(len(ds.targets[0]['regions']['$tip']), 2)
self.assertNotIn('$par', ds.class_mapping['regions'])
self.assertEqual(sample['image'].shape, (1, 200, 100))
self.assertEqual(sample['target'].shape, (ds.num_classes, 200, 100))
class TestInputTransforms(unittest.TestCase):
"""
Tests for ImageInputTransforms class
"""
def setUp(self):
self.im = Image.open(resources / '000236.png')
self.simple_inst = {'batch': 1,
'height': 48,
'width': 0,
'channels': 1,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
self.simple_inst_norm = {'batch': 1,
'height': 48,
'width': 0,
'channels': 1,
'pad': (16, 0),
'valid_norm': True,
'force_binarization': False}
self.simple_inst_rgb = {'batch': 1,
'height': 48,
'width': 0,
'channels': 3,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
self.simple_inst_norm_rgb = {'batch': 1,
'height': 48,
'width': 0,
'channels': 3,
'pad': (16, 0),
'valid_norm': True,
'force_binarization': False}
self.channel_height_inst = {'batch': 1,
'height': 1,
'width': 0,
'channels': 72,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
self.invalid_channels = {'batch': 1,
'height': 48,
'width': 0,
'channels': 4,
'pad': (16, 0),
'valid_norm': False,
'force_binarization': False}
def test_imageinputtransforms_simple(self):
"""
Simple ImageInputTransforms instantiation.
"""
tf = ImageInputTransforms(**self.simple_inst)
for k, v in self.simple_inst.items():
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
check_output(self, self.simple_inst, self.im, tf(self.im))
def test_imageinputtransforms_simple_rgb(self):
"""
Simple RGB ImageInputTransforms instantiation.
"""
tf = ImageInputTransforms(**self.simple_inst_rgb)
for k, v in self.simple_inst_rgb.items():
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
check_output(self, self.simple_inst_rgb, self.im, tf(self.im))
def test_imageinputtransforms_norm_rgb(self):
"""
RGB ImageInputTransforms instantiation with centerline normalization
valid (but not enabled).
"""
tf = ImageInputTransforms(**self.simple_inst_norm_rgb)
for k, v in self.simple_inst_norm_rgb.items():
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
check_output(self, self.simple_inst_norm_rgb, self.im, tf(self.im))
def test_imageinputtransforms_simple_norm(self):
"""
ImageInputTransforms instantiation with centerline normalization valid.
"""
tf = ImageInputTransforms(**self.simple_inst_norm)
for k, v in self.simple_inst_norm.items():
self.assertEqual(getattr(tf, k), v)
self.assertTrue(tf.centerline_norm)
check_output(self, self.simple_inst_norm, self.im, tf(self.im))
def test_imageinputtransforms_channel_height(self):
"""
ImageInputTransforms with height in channel dimension
"""
tf = ImageInputTransforms(**self.channel_height_inst)
for k, v in self.channel_height_inst.items():
if k == 'channels':
self.assertEqual(1, tf.channels)
elif k == 'height':
self.assertEqual(self.channel_height_inst['channels'], tf.height)
else:
self.assertEqual(getattr(tf, k), v)
self.assertFalse(tf.centerline_norm)
self.channel_height_inst['height'] = self.channel_height_inst['channels']
self.channel_height_inst['channels'] = 1
check_output(self, self.channel_height_inst, self.im, tf(self.im))
def test_imageinputtransforms_invalid_channels(self):
"""
ImageInputTransforms instantiation with invalid number of channels
"""
with raises(KrakenInputException):
tf = ImageInputTransforms(**self.invalid_channels)
|
585dc5b9219eff30a900bdaf1d1ca7e604af4ab2
|
d75359fde22b08a4109b30bb39c9db27961fa417
|
/setup.py
|
5d6424034b3d9f2d743ba7becc12154686528c53
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
authlib/loginpass
|
58f0881b4e5975c305e633337d1b86657bea907b
|
635823a78a2a92cf8630f9935aebb9afcccb8656
|
refs/heads/master
| 2022-06-08T13:08:09.271879
| 2020-12-08T06:04:39
| 2020-12-08T06:04:39
| 128,506,236
| 280
| 95
|
BSD-3-Clause
| 2022-05-13T19:30:54
| 2018-04-07T07:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
setup.py
|
#!/usr/bin/env python
import re
from setuptools import setup
_version_re = re.compile(r"version\s=\s'(.*)'")
with open('loginpass/_consts.py', 'r') as f:
version = _version_re.search(f.read()).group(1)
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='loginpass',
version=version,
description='Social connections powered by Authlib for Flask and Django',
long_description=long_description,
url='https://authlib.org/',
zip_safe=False,
license='BSD-3-Clause',
packages=['loginpass'],
install_requires=['requests', 'Authlib>=0.14.3'],
include_package_data=True,
tests_require=['nose', 'mock'],
test_suite='nose.collector',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Flask',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
fbdf8aa1895ed37bba792e08c00df4e4d6219f2a
|
020d68f6a3dc1d448551c4bd803bc9b9d7d7c34c
|
/easy_maps/migrations/0001_initial.py
|
1c2e19b052524c5c5be1f86b63c0500f6fab0776
|
[
"MIT"
] |
permissive
|
bashu/django-easy-maps
|
be8c8cadff034934eef56c77175d3d52afec6e9e
|
99930f091ee52380afa8814bd99687ef9eddc97d
|
refs/heads/develop
| 2021-12-10T09:22:37.935387
| 2021-11-29T08:14:47
| 2021-12-01T03:49:42
| 3,122,074
| 123
| 59
|
MIT
| 2021-09-25T12:10:16
| 2012-01-07T00:00:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
0001_initial.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Address",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("address", models.CharField(unique=True, max_length=255, verbose_name="address")),
(
"computed_address",
models.CharField(max_length=255, null=True, verbose_name="computed address", blank=True),
),
("latitude", models.FloatField(null=True, verbose_name="latitude", blank=True)),
("longitude", models.FloatField(null=True, verbose_name="longitude", blank=True)),
("geocode_error", models.BooleanField(default=False, verbose_name="geocode error")),
],
options={
"verbose_name": "EasyMaps Address",
"verbose_name_plural": "Address Geocoding Cache",
},
),
]
|
836e0f9c413e717ae4a328a2ca2a73ffc0f24997
|
59f64b5cf799e31c97b11828dba4787afb8f3f17
|
/batch/batch/front_end/validate.py
|
d18dd839f19eb320b87b891fa1a8fcd872f24a15
|
[
"MIT"
] |
permissive
|
hail-is/hail
|
2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1
|
07a483ae0f46c66f3ed6fd265b48f48c06298f98
|
refs/heads/main
| 2023-09-01T15:03:01.450365
| 2023-09-01T02:46:35
| 2023-09-01T02:46:35
| 45,069,467
| 913
| 262
|
MIT
| 2023-09-14T21:53:32
| 2015-10-27T20:55:42
|
Python
|
UTF-8
|
Python
| false
| false
| 7,160
|
py
|
validate.py
|
from hailtop.batch_client.parse import (
CPU_REGEX,
CPU_REGEXPAT,
MEMORY_REGEX,
MEMORY_REGEXPAT,
STORAGE_REGEX,
STORAGE_REGEXPAT,
)
from hailtop.utils.validate import (
ValidationError,
anyof,
bool_type,
dictof,
int_type,
keyed,
listof,
non_empty_str_type,
nullable,
numeric,
oneof,
regex,
required,
str_type,
switch,
)
from ..globals import memory_types
k8s_str = regex(r'[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9](?:[-a-z0-9]*[a-z0-9])?)*', maxlen=253)
# FIXME validate image
# https://github.com/docker/distribution/blob/master/reference/regexp.go#L68
image_str = str_type
# DEPRECATED:
# command -> process/command
# image -> process/image
# mount_docker_socket -> process/mount_docker_socket
# pvc_size -> resources/storage
# gcsfuse -> cloudfuse
job_validator = keyed(
{
'always_copy_output': bool_type,
'always_run': bool_type,
'attributes': dictof(str_type),
'env': listof(keyed({'name': str_type, 'value': str_type})),
'cloudfuse': listof(
keyed(
{
required('bucket'): non_empty_str_type,
required('mount_path'): non_empty_str_type,
required('read_only'): bool_type,
}
)
),
'input_files': listof(keyed({required('from'): str_type, required('to'): str_type})),
required('job_id'): int_type,
'mount_tokens': bool_type,
'network': oneof('public', 'private'),
'unconfined': bool_type,
'output_files': listof(keyed({required('from'): str_type, required('to'): str_type})),
'parent_ids': listof(int_type),
'absolute_parent_ids': listof(int_type),
'in_update_parent_ids': listof(int_type),
'port': int_type,
required('process'): switch(
'type',
{
'docker': {
required('command'): listof(str_type),
required('image'): image_str,
'mount_docker_socket': bool_type, # DEPRECATED
},
'jvm': {
required('jar_spec'): keyed(
{required('type'): oneof('git_revision', 'jar_url'), required('value'): str_type}
),
required('command'): listof(str_type),
'profile': bool_type,
},
},
),
'regions': listof(str_type),
'requester_pays_project': str_type,
'resources': keyed(
{
'memory': anyof(regex(MEMORY_REGEXPAT, MEMORY_REGEX), oneof(*memory_types)),
'cpu': regex(CPU_REGEXPAT, CPU_REGEX),
'storage': regex(STORAGE_REGEXPAT, STORAGE_REGEX),
'machine_type': str_type,
'preemptible': bool_type,
}
),
'secrets': listof(
keyed({required('namespace'): k8s_str, required('name'): k8s_str, required('mount_path'): str_type})
),
'service_account': keyed({required('namespace'): k8s_str, required('name'): k8s_str}),
'timeout': numeric(**{"x > 0": lambda x: x > 0}),
'user_code': str_type,
}
)
batch_validator = keyed(
{
'attributes': nullable(dictof(str_type)),
required('billing_project'): str_type,
'callback': nullable(str_type),
required('n_jobs'): int_type,
required('token'): str_type,
'cancel_after_n_failures': nullable(numeric(**{"x > 0": lambda x: isinstance(x, int) and x > 0})),
}
)
batch_update_validator = keyed(
{
required('token'): str_type,
required('n_jobs'): numeric(**{"x > 0": lambda x: isinstance(x, int) and x > 0}),
}
)
def validate_and_clean_jobs(jobs):
if not isinstance(jobs, list):
raise ValidationError('jobs is not list')
for i, job in enumerate(jobs):
handle_deprecated_job_keys(i, job)
job_validator.validate(f"jobs[{i}]", job)
handle_job_backwards_compatibility(job)
def handle_deprecated_job_keys(i, job):
if 'pvc_size' in job:
if 'resources' in job and 'storage' in job['resources']:
raise ValidationError(
f"jobs[{i}].resources.storage is already defined, but " f"deprecated key 'pvc_size' is also present."
)
pvc_size = job['pvc_size']
try:
job_validator['resources']['storage'].validate(f"jobs[{i}].pvc_size", job['pvc_size'])
except ValidationError as e:
raise ValidationError(f"[pvc_size key is DEPRECATED. Use " f"resources.storage] {e.reason}") from e
resources = job.get('resources')
if resources is None:
resources = {}
job['resources'] = resources
resources['storage'] = pvc_size
del job['pvc_size']
if 'process' not in job:
process_keys = ['command', 'image']
if 'command' not in job or 'image' not in job:
raise ValidationError(
f'jobs[{i}].process is not defined, but '
f'deprecated keys {[k for k in process_keys if k not in job]} '
f'are not in jobs[{i}]'
)
command = job['command']
image = job['image']
try:
for k in process_keys:
job_validator['process']['docker'][k].validate(f"jobs[{i}].{k}", job[k])
except ValidationError as e:
raise ValidationError(
f"[command, image keys are "
f"DEPRECATED. Use process.command, process.image, "
f"with process.type = 'docker'.] "
f"{e.reason}"
) from e
job['process'] = {
'command': command,
'image': image,
'type': 'docker',
}
del job['command']
del job['image']
elif 'command' in job or 'image' in job:
raise ValidationError(
f"jobs[{i}].process is already defined, but "
f"deprecated keys 'command', 'image' "
f"are also present. "
f"Please remove deprecated keys."
)
mount_docker_socket = job['process'].pop('mount_docker_socket', False)
if mount_docker_socket:
raise ValidationError(
"mount_docker_socket is no longer supported but was set to True in request. Please upgrade."
)
if 'gcsfuse' in job:
job['cloudfuse'] = job.pop('gcsfuse')
def handle_job_backwards_compatibility(job):
if 'cloudfuse' in job:
job['gcsfuse'] = job.pop('cloudfuse')
if 'parent_ids' in job:
job['absolute_parent_ids'] = job.pop('parent_ids')
if 'always_copy_output' not in job:
job['always_copy_output'] = True
if 'process' in job:
process = job['process']
if process['type'] == 'jvm' and 'profile' not in process:
process['profile'] = False
def validate_batch(batch):
batch_validator.validate('batch', batch)
def validate_batch_update(update):
batch_update_validator.validate('batch_update', update)
|
9eb7830cb8fc23e1a73b9e6ec322a3d34313b1d9
|
ee87c715e5d937b0380ddb87d56e9ebc4877a02b
|
/build_tools/circle/list_versions.py
|
345e08b4bece4b83b735e6adf02990aa0d45d662
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn/scikit-learn
|
27a2196f3173e0f32f7a5c5d652b70a6c57c7644
|
061f8777b48e5491b0c57bb8e0bc7067c103079d
|
refs/heads/main
| 2023-08-18T15:32:59.764468
| 2023-08-18T14:39:08
| 2023-08-18T14:39:08
| 843,222
| 58,456
| 29,777
|
BSD-3-Clause
| 2023-09-14T19:08:34
| 2010-08-17T09:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,737
|
py
|
list_versions.py
|
#!/usr/bin/env python3
# List all available versions of the documentation
import json
import re
import sys
from urllib.request import urlopen
from sklearn.utils.fixes import parse_version
def json_urlread(url):
try:
return json.loads(urlopen(url).read().decode("utf8"))
except Exception:
print("Error reading", url, file=sys.stderr)
raise
def human_readable_data_quantity(quantity, multiple=1024):
# https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
if quantity == 0:
quantity = +0
SUFFIXES = ["B"] + [i + {1000: "B", 1024: "iB"}[multiple] for i in "KMGTPEZY"]
for suffix in SUFFIXES:
if quantity < multiple or suffix == SUFFIXES[-1]:
if suffix == SUFFIXES[0]:
return "%d %s" % (quantity, suffix)
else:
return "%.1f %s" % (quantity, suffix)
else:
quantity /= multiple
def get_file_extension(version):
if "dev" in version:
# The 'dev' branch should be explicitly handled
return "zip"
current_version = parse_version(version)
min_zip_version = parse_version("0.24")
return "zip" if current_version >= min_zip_version else "pdf"
def get_file_size(version):
api_url = ROOT_URL + "%s/_downloads" % version
for path_details in json_urlread(api_url):
file_extension = get_file_extension(version)
file_path = f"scikit-learn-docs.{file_extension}"
if path_details["name"] == file_path:
return human_readable_data_quantity(path_details["size"], 1000)
print(":orphan:")
print()
heading = "Available documentation for Scikit-learn"
print(heading)
print("=" * len(heading))
print()
print("Web-based documentation is available for versions listed below:")
print()
ROOT_URL = (
"https://api.github.com/repos/scikit-learn/scikit-learn.github.io/contents/" # noqa
)
RAW_FMT = "https://raw.githubusercontent.com/scikit-learn/scikit-learn.github.io/master/%s/index.html" # noqa
VERSION_RE = re.compile(r"scikit-learn ([\w\.\-]+) documentation</title>")
NAMED_DIRS = ["dev", "stable"]
# Gather data for each version directory, including symlinks
dirs = {}
symlinks = {}
root_listing = json_urlread(ROOT_URL)
for path_details in root_listing:
name = path_details["name"]
if not (name[:1].isdigit() or name in NAMED_DIRS):
continue
if path_details["type"] == "dir":
html = urlopen(RAW_FMT % name).read().decode("utf8")
version_num = VERSION_RE.search(html).group(1)
file_size = get_file_size(name)
dirs[name] = (version_num, file_size)
if path_details["type"] == "symlink":
symlinks[name] = json_urlread(path_details["_links"]["self"])["target"]
# Symlinks should have same data as target
for src, dst in symlinks.items():
if dst in dirs:
dirs[src] = dirs[dst]
# Output in order: dev, stable, decreasing other version
seen = set()
for name in NAMED_DIRS + sorted(
(k for k in dirs if k[:1].isdigit()), key=parse_version, reverse=True
):
version_num, file_size = dirs[name]
if version_num in seen:
# symlink came first
continue
else:
seen.add(version_num)
name_display = "" if name[:1].isdigit() else " (%s)" % name
path = "https://scikit-learn.org/%s/" % name
out = "* `Scikit-learn %s%s documentation <%s>`_" % (
version_num,
name_display,
path,
)
if file_size is not None:
file_extension = get_file_extension(version_num)
out += (
f" (`{file_extension.upper()} {file_size} <{path}/"
f"_downloads/scikit-learn-docs.{file_extension}>`_)"
)
print(out)
|
f81eb423e7ef7b3b93b810db68d55d56242e81d6
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/xpu/test_c_concat.py
|
d2490aa3772dcd91f1d59af3073857314d6db0c3
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
test_c_concat.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
get_xpu_op_support_types,
)
from test_collective_base_xpu import TestDistBase
import paddle
from paddle.fluid import core
paddle.enable_static()
class XPUTestCConcatOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'c_concat'
self.use_dynamic_create_class = False
class TestConcatOp(TestDistBase):
def _setup_config(self):
pass
def test_concat(self, col_type="c_concat"):
self.check_with_place(
"collective_concat_op.py", col_type, self.in_type_str
)
support_types = get_xpu_op_support_types('c_concat')
for stype in support_types:
create_test_class(
globals(),
XPUTestCConcatOp,
stype,
ignore_device_version=[core.XPUVersion.XPU1],
)
if __name__ == '__main__':
unittest.main()
|
b9bf1075c75723e0de0ca40c75608bb05384462b
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/openpyxl/openpyxl/writer/excel.pyi
|
57e9d2ac1884bc1f24c0b6362d940510db66028c
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 358
|
pyi
|
excel.pyi
|
from typing import Any
class ExcelWriter:
workbook: Any
manifest: Any
vba_modified: Any
def __init__(self, workbook, archive) -> None: ...
def write_data(self) -> None: ...
def write_worksheet(self, ws) -> None: ...
def save(self) -> None: ...
def save_workbook(workbook, filename): ...
def save_virtual_workbook(workbook): ...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.