hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
344e54e490f2768644b1bab87e5d4d21e99bcac3 | 8,244 | py | Python | tests/extras/datasets/pandas/test_feather_dataset.py | andmikey/kedro | 9b4e4135720609d44ffdf5248246fe805f0b5469 | [
"Apache-2.0"
] | null | null | null | tests/extras/datasets/pandas/test_feather_dataset.py | andmikey/kedro | 9b4e4135720609d44ffdf5248246fe805f0b5469 | [
"Apache-2.0"
] | null | null | null | tests/extras/datasets/pandas/test_feather_dataset.py | andmikey/kedro | 9b4e4135720609d44ffdf5248246fe805f0b5469 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import PurePosixPath
import pandas as pd
import pytest
from fsspec.implementations.http import HTTPFileSystem
from fsspec.implementations.local import LocalFileSystem
from gcsfs import GCSFileSystem
from pandas.testing import assert_frame_equal
from s3fs.core import S3FileSystem
from kedro.extras.datasets.pandas import FeatherDataSet
from kedro.io import DataSetError
from kedro.io.core import Version
@pytest.fixture
def filepath_feather(tmp_path):
return str(tmp_path / "test.feather")
@pytest.fixture
def feather_data_set(filepath_feather, load_args):
return FeatherDataSet(filepath=filepath_feather, load_args=load_args)
@pytest.fixture
def versioned_feather_data_set(filepath_feather, load_version, save_version):
return FeatherDataSet(
filepath=filepath_feather, version=Version(load_version, save_version)
)
@pytest.fixture
def dummy_dataframe():
return pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]})
class TestFeatherDataSet:
def test_save_and_load(self, feather_data_set, dummy_dataframe):
"""Test saving and reloading the data set."""
feather_data_set.save(dummy_dataframe)
reloaded = feather_data_set.load()
assert_frame_equal(dummy_dataframe, reloaded)
def test_exists(self, feather_data_set, dummy_dataframe):
"""Test `exists` method invocation for both existing and
nonexistent data set."""
assert not feather_data_set.exists()
feather_data_set.save(dummy_dataframe)
assert feather_data_set.exists()
@pytest.mark.parametrize(
"load_args", [{"k1": "v1", "index": "value"}], indirect=True
)
def test_load_extra_params(self, feather_data_set, load_args):
"""Test overriding the default load arguments."""
for key, value in load_args.items():
assert feather_data_set._load_args[key] == value
def test_load_missing_file(self, feather_data_set):
"""Check the error when trying to load missing file."""
pattern = r"Failed while loading data from data set FeatherDataSet\(.*\)"
with pytest.raises(DataSetError, match=pattern):
feather_data_set.load()
@pytest.mark.parametrize(
"filepath,instance_type",
[
("s3://bucket/file.feather", S3FileSystem),
("file:///tmp/test.feather", LocalFileSystem),
("/tmp/test.feather", LocalFileSystem),
("gcs://bucket/file.feather", GCSFileSystem),
("https://example.com/file.feather", HTTPFileSystem),
],
)
def test_protocol_usage(self, filepath, instance_type):
data_set = FeatherDataSet(filepath=filepath)
assert isinstance(data_set._fs, instance_type)
# _strip_protocol() doesn't strip http(s) protocol
if data_set._protocol == "https":
path = filepath.split("://")[-1]
else:
path = data_set._fs._strip_protocol(filepath)
assert str(data_set._filepath) == path
assert isinstance(data_set._filepath, PurePosixPath)
def test_catalog_release(self, mocker):
fs_mock = mocker.patch("fsspec.filesystem").return_value
filepath = "test.feather"
data_set = FeatherDataSet(filepath=filepath)
data_set.release()
fs_mock.invalidate_cache.assert_called_once_with(filepath)
class TestFeatherDataSetVersioned:
def test_version_str_repr(self, load_version, save_version):
"""Test that version is in string representation of the class instance
when applicable."""
filepath = "test.feather"
ds = FeatherDataSet(filepath=filepath)
ds_versioned = FeatherDataSet(
filepath=filepath, version=Version(load_version, save_version)
)
assert filepath in str(ds)
assert "version" not in str(ds)
assert filepath in str(ds_versioned)
ver_str = "version=Version(load={}, save='{}')".format(
load_version, save_version
)
assert ver_str in str(ds_versioned)
assert "FeatherDataSet" in str(ds_versioned)
assert "FeatherDataSet" in str(ds)
assert "protocol" in str(ds_versioned)
assert "protocol" in str(ds)
def test_save_and_load(self, versioned_feather_data_set, dummy_dataframe):
"""Test that saved and reloaded data matches the original one for
the versioned data set."""
versioned_feather_data_set.save(dummy_dataframe)
reloaded_df = versioned_feather_data_set.load()
assert_frame_equal(dummy_dataframe, reloaded_df)
def test_no_versions(self, versioned_feather_data_set):
"""Check the error if no versions are available for load."""
pattern = r"Did not find any versions for FeatherDataSet\(.+\)"
with pytest.raises(DataSetError, match=pattern):
versioned_feather_data_set.load()
def test_exists(self, versioned_feather_data_set, dummy_dataframe):
"""Test `exists` method invocation for versioned data set."""
assert not versioned_feather_data_set.exists()
versioned_feather_data_set.save(dummy_dataframe)
assert versioned_feather_data_set.exists()
def test_prevent_overwrite(self, versioned_feather_data_set, dummy_dataframe):
"""Check the error when attempting to overwrite the data set if the
corresponding feather file for a given save version already exists."""
versioned_feather_data_set.save(dummy_dataframe)
pattern = (
r"Save path \`.+\` for FeatherDataSet\(.+\) must "
r"not exist if versioning is enabled\."
)
with pytest.raises(DataSetError, match=pattern):
versioned_feather_data_set.save(dummy_dataframe)
@pytest.mark.parametrize(
"load_version", ["2019-01-01T23.59.59.999Z"], indirect=True
)
@pytest.mark.parametrize(
"save_version", ["2019-01-02T00.00.00.000Z"], indirect=True
)
def test_save_version_warning(
self, versioned_feather_data_set, load_version, save_version, dummy_dataframe
):
"""Check the warning when saving to the path that differs from
the subsequent load path."""
pattern = (
r"Save version `{0}` did not match load version `{1}` "
r"for FeatherDataSet\(.+\)".format(save_version, load_version)
)
with pytest.warns(UserWarning, match=pattern):
versioned_feather_data_set.save(dummy_dataframe)
def test_http_filesystem_no_versioning(self):
pattern = r"HTTP\(s\) DataSet doesn't support versioning\."
with pytest.raises(DataSetError, match=pattern):
FeatherDataSet(
filepath="https://example.com/file.feather", version=Version(None, None)
)
| 40.811881 | 88 | 0.700631 |
16c621a7ecc71bf0a69eed6a66559507064bdc34 | 650 | py | Python | scripts/python/genjson.py | akvo/flowvis | 9dae54992cf8bda9e172fbf0eefcf172607746c9 | [
"Unlicense"
] | null | null | null | scripts/python/genjson.py | akvo/flowvis | 9dae54992cf8bda9e172fbf0eefcf172607746c9 | [
"Unlicense"
] | null | null | null | scripts/python/genjson.py | akvo/flowvis | 9dae54992cf8bda9e172fbf0eefcf172607746c9 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# Parses data in csv form to json
import sys
import csv
import json
filename = "data/liberia-data.csv"
count = 10
if len(sys.argv) > 1:
filename = sys.argv[1]
if len(sys.argv) > 2:
count = int(sys.argv[2])
f = open(filename, "r")
fieldnames = [n.lstrip('"').rstrip().rstrip('"') for n in f.readline().split(",")]
reader = csv.DictReader(f, fieldnames=fieldnames)
out = []
while count > 0:
od = {}
for k,v in reader.next().items():
if k == "Instance":
od['id'] = v
else:
od[k[0].lower() + k[1:]] = v
out.append(od)
count -= 1
print json.dumps(out, indent=True)
| 21.666667 | 82 | 0.578462 |
0d3638c02eea8f2809c3acc0d4c649140584b12b | 3,508 | py | Python | parlai/scripts/token_stats.py | terrorizer1980/ParlAI | f8fda24bd11804104b0a91aa84e170d3efbd8983 | [
"MIT"
] | 2 | 2020-08-27T05:21:14.000Z | 2020-09-29T14:34:09.000Z | parlai/scripts/token_stats.py | terrorizer1980/ParlAI | f8fda24bd11804104b0a91aa84e170d3efbd8983 | [
"MIT"
] | 316 | 2021-03-19T14:53:31.000Z | 2022-03-27T03:36:51.000Z | parlai/scripts/token_stats.py | terrorizer1980/ParlAI | f8fda24bd11804104b0a91aa84e170d3efbd8983 | [
"MIT"
] | 2 | 2020-10-29T18:14:33.000Z | 2020-11-07T09:46:23.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from parlai.core.script import ParlaiScript
from parlai.core.agents import create_agent
from parlai.core.torch_agent import TorchAgent
from parlai.core.worlds import create_task
from parlai.core.params import ParlaiParser
from parlai.utils.misc import TimeLogger, nice_report
import parlai.utils.logging as logging
class TokenStats(ParlaiScript):
@classmethod
def setup_args(cls):
parser = ParlaiParser(True, True, description='Compute tokenized stats.')
parser.add_argument('--num-examples', '-n', type=int, default=-1)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)
parser.add_argument('--field', default='text')
parser.add_argument('--final-only', type='bool', default=False)
parser.set_defaults(
no_cuda=True, model='test_agents/null', datatype='train:stream:ordered'
)
return parser
def _compute_stats(self, lengths):
lengths = np.array(lengths)
return {
"exs": len(lengths),
"min": np.min(lengths),
"max": np.max(lengths),
"mean": np.median(lengths),
"p01": np.quantile(lengths, 0.05),
"p05": np.quantile(lengths, 0.05),
"p10": np.quantile(lengths, 0.10),
"p25": np.quantile(lengths, 0.25),
"p50": np.quantile(lengths, 0.50),
"p75": np.quantile(lengths, 0.75),
"p90": np.quantile(lengths, 0.90),
"p95": np.quantile(lengths, 0.95),
"p99": np.quantile(lengths, 0.99),
"p@128": np.mean(lengths <= 128),
}
def run(self):
self.opt['no_cuda'] = True
if 'ordered' not in self.opt['datatype'] and 'train' in self.opt['datatype']:
self.opt['datatype'] = self.opt['datatype'] + ':ordered'
agent = create_agent(self.opt)
agent.opt.log()
num_examples = self.opt['num_examples']
field = self.opt['field'] + '_vec'
if num_examples < 0:
num_examples = float('inf')
assert self.opt['batchsize'] == 1
assert isinstance(agent, TorchAgent)
world = create_task(self.opt, agent)
teacher = world.get_task_agent()
# set up logging
log_every_n_secs = self.opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
lengths = []
cnt = 0
total = min(teacher.num_examples(), num_examples)
while not teacher.epoch_done() and cnt < num_examples:
act = teacher.act()
processed = agent.observe(act)
text_vec = processed[field]
if text_vec is not None and (
not self.opt['final_only'] or act.get('episode_done')
):
cnt += 1
lengths.append(float(len(text_vec)))
agent.self_observe({})
if log_time.time() > log_every_n_secs:
report = self._compute_stats(lengths)
text, report = log_time.log(report['exs'], total, report)
logging.info(text)
report = self._compute_stats(lengths)
print(nice_report(report))
if __name__ == '__main__':
TokenStats.main()
| 36.164948 | 85 | 0.595496 |
429096d51d979797728e3645332676cc03000395 | 4,511 | py | Python | nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,874 | 2015-01-04T05:18:34.000Z | 2022-03-31T03:30:28.000Z | nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 40 | 2015-04-13T02:32:42.000Z | 2022-02-16T02:28:06.000Z | nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,996 | 2015-01-04T15:11:51.000Z | 2022-03-31T11:03:13.000Z | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack.compute import console_auth_tokens \
as console_auth_tokens_v21
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
class ConsoleAuthTokensExtensionTestV21(test.NoDBTestCase):
controller_class = console_auth_tokens_v21
_EXPECTED_OUTPUT = {'console': {'instance_uuid': fakes.FAKE_UUID,
'host': 'fake_host',
'port': '1234',
'internal_access_path':
'fake_access_path'}}
# The database backend returns a ConsoleAuthToken.to_dict() and o.vo
# StringField are unicode. And the port is an IntegerField.
_EXPECTED_OUTPUT_DB = copy.deepcopy(_EXPECTED_OUTPUT)
_EXPECTED_OUTPUT_DB['console'].update(
{'host': u'fake_host', 'port': 1234,
'internal_access_path': u'fake_access_path'})
def setUp(self):
super(ConsoleAuthTokensExtensionTestV21, self).setUp()
self.controller = self.controller_class.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.context = self.req.environ['nova.context']
@mock.patch('nova.objects.ConsoleAuthToken.validate',
return_value=objects.ConsoleAuthToken(
instance_uuid=fakes.FAKE_UUID, host='fake_host',
port='1234', internal_access_path='fake_access_path',
console_type='rdp-html5', token=fakes.FAKE_UUID))
def test_get_console_connect_info(self, mock_validate):
output = self.controller.show(self.req, fakes.FAKE_UUID)
self.assertEqual(self._EXPECTED_OUTPUT_DB, output)
mock_validate.assert_called_once_with(self.context, fakes.FAKE_UUID)
@mock.patch('nova.objects.ConsoleAuthToken.validate',
side_effect=exception.InvalidToken(token='***'))
def test_get_console_connect_info_token_not_found(self, mock_validate):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, fakes.FAKE_UUID)
mock_validate.assert_called_once_with(self.context, fakes.FAKE_UUID)
@mock.patch('nova.objects.ConsoleAuthToken.validate',
return_value=objects.ConsoleAuthToken(
instance_uuid=fakes.FAKE_UUID, host='fake_host',
port='1234', internal_access_path='fake_access_path',
console_type='unauthorized_console_type',
token=fakes.FAKE_UUID))
def test_get_console_connect_info_nonrdp_console_type(self, mock_validate):
self.assertRaises(webob.exc.HTTPUnauthorized,
self.controller.show, self.req, fakes.FAKE_UUID)
mock_validate.assert_called_once_with(self.context, fakes.FAKE_UUID)
class ConsoleAuthTokensExtensionTestV231(ConsoleAuthTokensExtensionTestV21):
def setUp(self):
super(ConsoleAuthTokensExtensionTestV231, self).setUp()
self.req.api_version_request = api_version_request.APIVersionRequest(
'2.31')
@mock.patch('nova.objects.ConsoleAuthToken.validate',
return_value = objects.ConsoleAuthToken(
instance_uuid=fakes.FAKE_UUID, host='fake_host',
port='1234', internal_access_path='fake_access_path',
console_type='webmks',
token=fakes.FAKE_UUID))
def test_get_console_connect_info_nonrdp_console_type(self, mock_validate):
output = self.controller.show(self.req, fakes.FAKE_UUID)
self.assertEqual(self._EXPECTED_OUTPUT_DB, output)
mock_validate.assert_called_once_with(self.context, fakes.FAKE_UUID)
| 46.030612 | 79 | 0.685879 |
784a30f2da2aba495b231252bbdab44f5b31a20b | 17,069 | py | Python | python/connect_c.py | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 17 | 2020-04-28T09:18:28.000Z | 2021-12-28T08:38:00.000Z | python/connect_c.py | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 2 | 2021-09-26T04:10:51.000Z | 2022-03-31T05:28:25.000Z | python/connect_c.py | goten-team/Goten | 690f1429b62c70caec72f4010ee5b7a9786f0d25 | [
"MIT"
] | 2 | 2021-09-26T05:06:17.000Z | 2021-12-14T16:25:06.000Z | #!usr/bin/env python
from __future__ import print_function
from multiprocessing.pool import ThreadPool
import numpy as np
import torch
from python.basic_utils import str_hash
from python.enclave_interfaces import GlobalTensor, SecretEnum
from python.quantize_net import swalp_quantize, NamedParam, dequantize_op
from python.timer_utils import NamedTimer, NamedTimerInstance
from python.common_torch import SecretConfig, mod_on_cpu, get_random_uniform, GlobalParam, \
quantize, generate_unquantized_tensor, dequantize, mod_move_down
from python.linear_shares import TensorLoader, conv2d_weight_grad_op
from python.torch_utils import compare_expected_actual
np.random.seed(123)
minibatch, inChan, outChan, imgHw, filHw = 64, 128, 128, 16, 3
minibatch, inChan, outChan, imgHw, filHw = 64, 64, 3, 32, 3
xShape = [minibatch, inChan, imgHw, imgHw]
wShape = [outChan, inChan, filHw, filHw]
# s consume the dummy self
ConvOp = lambda w, x: torch.conv2d(x, w, padding=1)
MatmOp = lambda w, x: torch.mm(w, x)
TargetOp = ConvOp
AQ = get_random_uniform(SecretConfig.PrimeLimit, wShape).type(SecretConfig.dtypeForCpuOp)
A0 = torch.zeros(AQ.size()).type(SecretConfig.dtypeForCpuOp)
A1 = torch.zeros(AQ.size()).type(SecretConfig.dtypeForCpuOp)
BQ = get_random_uniform(SecretConfig.PrimeLimit, xShape).type(SecretConfig.dtypeForCpuOp)
B0 = get_random_uniform(SecretConfig.PrimeLimit, xShape).type(SecretConfig.dtypeForCpuOp)
B1 = mod_on_cpu(BQ - B0)
idealC = mod_on_cpu(TargetOp(AQ.type(torch.double), BQ.type(torch.double))).type(SecretConfig.dtypeForCpuOp)
yShape = list(idealC.size())
C0 = get_random_uniform(SecretConfig.PrimeLimit, yShape).type(SecretConfig.dtypeForCpuOp)
C1 = get_random_uniform(SecretConfig.PrimeLimit, yShape).type(SecretConfig.dtypeForCpuOp)
Z = mod_on_cpu(idealC - C0 - C1)
class EnclaveInterfaceTester(TensorLoader):
def __init__(self):
super().__init__()
self.Name = "SingleLayer"
self.LayerId = str_hash(self.Name)
self.Sid = 0
def name_modifier(self, name):
return self.Name + "--" + str(name)
def init_test(self):
print()
GlobalTensor.init()
self.set_eid(GlobalTensor.get_eid())
def test_tensor(self):
print()
print("minibatch, inChan, outChan, imgHw, filHw = %d, %d, %d, %d, %d"
% (minibatch, inChan, outChan, imgHw, filHw))
print("wShape", wShape)
print("xShape", xShape)
print("yShape", yShape)
print()
NamedTimer.start("InitTensor")
self.init_enclave_tensor("BQ", BQ.size())
NamedTimer.end("InitTensor")
print()
NamedTimer.start("Preprare Decrypt")
C0Enc = self.create_encrypt_torch(C0.size())
NamedTimer.end("Preprare Decrypt")
C0Rec = torch.zeros(C0.size()).type(SecretConfig.dtypeForCpuOp)
# AES Encryption and Decryption
NamedTimer.start("AesEncrypt")
self.aes_encrypt(C0, C0Enc)
NamedTimer.end("AesEncrypt")
NamedTimer.start("AesDecrypt")
self.aes_decrypt(C0Enc, C0Rec)
NamedTimer.end("AesDecrypt")
print("Error of Enc and Dec:", compare_expected_actual(C0, C0Rec))
print()
self.init_enclave_tensor("AQ", AQ.size())
self.init_enclave_tensor("A0", A0.size())
self.init_enclave_tensor("A1", A1.size())
self.init_enclave_tensor("BQ", BQ.size())
self.init_enclave_tensor("B0", B0.size())
self.init_enclave_tensor("B1", B1.size())
NamedTimer.start("SetTen")
self.set_tensor("AQ", AQ)
NamedTimer.end("SetTen")
# Test the Random Generation
NamedTimer.start("GenRandomUniform: x (A)");
get_random_uniform(SecretConfig.PrimeLimit, xShape).type(SecretConfig.dtypeForCpuOp)
NamedTimer.end("GenRandomUniform: x (A)");
npAQ = AQ.numpy()
print("PrimeLimit:", SecretConfig.PrimeLimit)
print("Python Rand max, min, avg:", np.max(npAQ), np.min(npAQ), np.average(npAQ))
# A0 and A1 should have the same PRG
self.set_seed("AQ", "A0")
self.set_seed("A0", "A0")
self.set_seed("A1", "A0")
self.set_seed("BQ", "B0")
self.set_seed("B0", "B0")
self.set_seed("B1", "B0")
NamedTimer.start("SetTen")
self.set_tensor("AQ", AQ)
NamedTimer.end("SetTen")
NamedTimer.start("GetRandom")
# self.GetRandom("A0", A0)
self.get_random("B0", B0)
NamedTimer.end("GetRandom")
self.set_tensor("BQ", BQ)
args = [("AQ", A1, "A0"), ("BQ", B1, "B0")]
with ThreadPool(2) as pool:
NamedTimer.start("GetShare")
self.get_share("AQ", A1, "A0")
self.get_share("BQ", B1, "B0")
NamedTimer.end("GetShare")
BQRec = mod_on_cpu(B1 + B0)
# MoveDown(BQ)
print(BQ[0, 0, 0, 0])
print(BQRec[0, 0, 0, 0])
print("Same Seed Error:", compare_expected_actual(BQ, BQRec))
print()
print("Reconstruct Shared C")
CRecon = torch.zeros(C0.size())
self.init_enclave_tensor("C0", C0.size())
self.init_enclave_tensor("C1", C1.size())
self.init_enclave_tensor("Z", Z.size())
self.init_enclave_tensor("CRecon", CRecon.size())
self.set_tensor("C0", C0)
self.set_tensor("C1", C1)
self.set_tensor("Z", Z)
self.enclave_recon("C0", "C1", "Z", "CRecon")
NamedTimer.start("Recon")
self.enclave_recon("C0", "C1", "Z", "CRecon")
NamedTimer.end("Recon")
NamedTimer.start("GetTen")
self.get_tensor("CRecon", CRecon)
NamedTimer.end("GetTen")
print("C Recon Error:", compare_expected_actual(idealC, CRecon))
NamedTimer.start("SimdRecon")
self.enclave_add3("C0", "Z", "C1", "CRecon")
NamedTimer.end("SimdRecon")
self.get_tensor("CRecon", CRecon)
npReal = CRecon.numpy()
npReal[npReal < 0] += SecretConfig.PrimeLimit
print("C SimRecon Error:", compare_expected_actual(idealC, CRecon))
def test_plain_compute(self):
print()
with NamedTimerInstance("Time of Plain Computation"):
TargetOp(AQ, BQ)
def test_async_test(self):
print()
x_shape = [512, 64, 32, 32]
w_shape = x_shape
def init_set(n):
self.init_enclave_tensor(n, w_shape)
self.generate_cpu_tensor(n, w_shape)
self.set_seed(n, n)
init_set("AQ")
init_set("BQ")
init_set("CQ")
init_set("DQ")
name1, tensor1 = "AQ", self.get_cpu("AQ")
name2, tensor2 = "BQ", self.get_cpu("BQ")
name3, tensor3 = "CQ", self.get_cpu("CQ")
name4, tensor4 = "DQ", self.get_cpu("DQ")
with NamedTimerInstance("GetRandom * 4"):
self.get_random("AQ", self.get_cpu("AQ"))
self.get_random("BQ", self.get_cpu("BQ"))
self.get_random("CQ", self.get_cpu("CQ"))
self.get_random("DQ", self.get_cpu("DQ"))
with NamedTimerInstance("GetShare * 4"):
self.get_share("AQ", self.get_cpu("AQ"), "AQ")
self.get_share("BQ", self.get_cpu("BQ"), "BQ")
self.get_share("CQ", self.get_cpu("CQ"), "CQ")
self.get_share("DQ", self.get_cpu("DQ"), "DQ")
with NamedTimerInstance("AsyncTask"):
self.async_task(name1, tensor1, name1,
name2, tensor2, name2,
name3, tensor3, name3,
name4, tensor4, name4)
print(torch.sum(self.get_cpu("AQ")))
print(torch.sum(self.get_cpu("BQ")))
def test_async_task(self):
print()
x_shape = [512, 64, 32, 32]
w_shape = x_shape
def init_set(name, seed, shape):
self.init_enclave_tensor(name, w_shape)
self.generate_cpu_tensor(name, shape)
self.set_seed(name, seed)
init_set("AQ", "AQ", w_shape)
init_set("BQ", "AQ", w_shape)
with NamedTimerInstance("AsyncGetShare"):
get_share_id = self.async_get_share("AQ", self.get_cpu("AQ"), "AQ")
get_random_id = self.async_get_random("BQ", self.get_cpu("BQ"), "AQ")
check_counter = 0
wanted_id = [get_share_id, get_random_id]
with NamedTimerInstance("GetStatus"):
while len(wanted_id) > 0:
check_counter += 1
to_be_removed = []
for id in wanted_id:
status = self.get_task_status(id)
if status:
to_be_removed.append(id)
for id in to_be_removed:
wanted_id.remove(id)
print("check_counter: ", check_counter)
print(torch.sum(self.get_cpu("AQ") + self.get_cpu("BQ")))
def test_fused_share(self, input_af, input_bf):
x_shape = input_af.shape
w_shape = input_bf.shape
self.init_enclave_tensor("Af", x_shape)
self.init_enclave_tensor("AQ", x_shape)
self.init_enclave_tensor("A0", x_shape)
self.init_enclave_tensor("A1", x_shape)
self.init_enclave_tensor("E", x_shape)
self.init_enclave_tensor("U", x_shape)
self.generate_cpu_tensor("Af", x_shape)
self.generate_cpu_tensor("A1", x_shape)
self.generate_cpu_tensor("E", x_shape)
self.generate_cpu_tensor("U", x_shape)
self.generate_cpu_tensor("E_nonfused", x_shape)
self.generate_cpu_tensor("A1_nonfused", x_shape)
self.set_seed("Af", "A0")
self.set_seed("Af", "U")
self.set_seed("AQ", "A0")
self.set_seed("AQ", "U")
self.init_enclave_tensor("Bf", w_shape)
self.init_enclave_tensor("BQ", w_shape)
self.init_enclave_tensor("B0", w_shape)
self.init_enclave_tensor("B1", w_shape)
self.init_enclave_tensor("F", w_shape)
self.init_enclave_tensor("V", w_shape)
self.generate_cpu_tensor("Bf", w_shape)
self.generate_cpu_tensor("B1", w_shape)
self.generate_cpu_tensor("F", w_shape)
self.generate_cpu_tensor("V", w_shape)
self.generate_cpu_tensor("F_nonfused", w_shape)
self.generate_cpu_tensor("B1_nonfused", w_shape)
self.set_seed("Bf", "B0")
self.set_seed("Bf", "V")
self.set_seed("BQ", "B0")
self.set_seed("BQ", "V")
print("allocated tensors")
def quantize_a():
self.set_cpu("Af", input_af)
self.transfer_cpu_to_enclave("Af")
self.set_cpu("AQ", swalp_quantize(NamedParam("Af", self.get_cpu("Af"))))
self.set_cpu("Bf", input_bf)
self.transfer_cpu_to_enclave("Bf")
self.set_cpu("BQ", swalp_quantize(NamedParam("Bf", self.get_cpu("Bf"))))
quantize_a()
print("Initialized tensors for fused share")
task_ids = []
with NamedTimerInstance("get_share x 4"):
task_ids.append(self.async_get_share("AQ", self.get_cpu("E_nonfused"), "U"))
task_ids.append(self.async_get_share("AQ", self.get_cpu("A1_nonfused"), "A0"))
task_ids.append(self.async_get_share("BQ", self.get_cpu("F_nonfused"), "V"))
task_ids.append(self.async_get_share("BQ", self.get_cpu("B1_nonfused"), "B0"))
for id in task_ids:
while not self.get_task_status(id):
pass
task_ids = []
with NamedTimerInstance("fused_quantize_share x 4"):
task_ids.append(self.fused_quantize_share("Af", "E", "Af", "U", is_async=True))
task_ids.append(self.fused_quantize_share("Af", "A1", "Af", "A0", is_async=True))
task_ids.append(self.fused_quantize_share("Bf", "F", "Bf", "V", is_async=True))
task_ids.append(self.fused_quantize_share("Bf", "B1", "Bf", "B0", is_async=True))
for id in task_ids:
while not self.get_task_status(id):
pass
compare_expected_actual(self.get_cpu("E_nonfused"), self.get_cpu("E"), get_relative=True, verbose=True)
compare_expected_actual(self.get_cpu("F_nonfused"), self.get_cpu("F"), get_relative=True, verbose=True)
quantize_a()
print("Initialized tensors for fused share 2")
task_ids = []
with NamedTimerInstance("get_share x 4"):
task_ids.append(self.async_get_share("AQ", self.get_cpu("E_nonfused"), "U"))
task_ids.append(self.async_get_share("AQ", self.get_cpu("A1_nonfused"), "A0"))
task_ids.append(self.async_get_share("BQ", self.get_cpu("F_nonfused"), "V"))
task_ids.append(self.async_get_share("BQ", self.get_cpu("B1_nonfused"), "B0"))
for id in task_ids:
while not self.get_task_status(id):
pass
task_ids = []
with NamedTimerInstance("fused_quantize_share2 x 2"):
task_ids.append(self.fused_quantize_share2("Af", "A1", "E", "Af", "A0", "U", is_async=True))
task_ids.append(self.fused_quantize_share2("Bf", "B1", "F", "Bf", "B0", "V", is_async=True))
for id in task_ids:
while not self.get_task_status(id):
pass
compare_expected_actual(self.get_cpu("E_nonfused"), self.get_cpu("E"), get_relative=True, verbose=True)
compare_expected_actual(self.get_cpu("F_nonfused"), self.get_cpu("F"), get_relative=True, verbose=True)
compare_expected_actual(self.get_cpu("A1_nonfused"), self.get_cpu("A1"), get_relative=True, verbose=True)
compare_expected_actual(self.get_cpu("B1_nonfused"), self.get_cpu("B1"), get_relative=True, verbose=True)
def marshal_fused_share(self):
def give_test(x, w):
self.test_fused_share(x, w)
x_shape = [512, 64, 32, 32]
dtype = SecretConfig.dtypeForCpuOp
give_test(torch.from_numpy(np.random.uniform(-18, 2.0 ** (127), size=x_shape)).type(dtype),
torch.from_numpy(np.random.uniform(-18, 2.0 ** (127), size=x_shape)).type(dtype),)
give_test(get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(dtype),
get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(dtype), )
def test_fused_recon(self):
# test: Cf ~= deQ(C' + Ci)
x_shape = [512, 64, 32, 32]
w_shape = [64, 64, 3, 3]
y_shape = [512, 64, 32, 32]
layer_name_op = "dummyLayer"
self.init_enclave_tensor("Af", x_shape)
self.init_enclave_tensor("E", x_shape)
self.generate_cpu_tensor("E", x_shape)
self.generate_cpu_tensor("AQ", x_shape)
self.generate_cpu_tensor("Af", x_shape)
self.set_seed("Af", "U")
self.set_cpu("Af", get_random_uniform(1000, x_shape).type(SecretConfig.dtypeForCpuOp))
self.transfer_cpu_to_enclave("Af")
self.set_cpu("AQ", swalp_quantize(NamedParam(layer_name_op + "X", self.get_cpu("Af"))))
self.fused_quantize_share("Af", "E", "Af", "U", is_async=False)
self.init_enclave_tensor("Bf", w_shape)
self.init_enclave_tensor("F", w_shape)
self.generate_cpu_tensor("F", w_shape)
self.generate_cpu_tensor("BQ", w_shape)
self.generate_cpu_tensor("Bf", w_shape)
self.set_seed("Bf", "V")
self.set_cpu("Bf", get_random_uniform(1000, w_shape).type(SecretConfig.dtypeForCpuOp))
self.transfer_cpu_to_enclave("Bf")
self.set_cpu("BQ", swalp_quantize(NamedParam(layer_name_op + "Y", self.get_cpu("Bf"))))
self.fused_quantize_share("Bf", "F", "Bf", "V", is_async=False)
self.init_enclave_tensor("Cf", y_shape)
self.init_enclave_tensor("CQ", y_shape)
self.init_enclave_tensor("Ci", y_shape)
self.generate_cpu_tensor("Cf", y_shape)
self.generate_cpu_tensor("CQ", y_shape)
self.generate_cpu_tensor("Ci", y_shape)
frozen_cq = get_random_uniform(1000, x_shape).type(SecretConfig.dtypeForCpuOp)
frozen_ci = get_random_uniform(1000, x_shape).type(SecretConfig.dtypeForCpuOp)
self.set_cpu("CQ", frozen_cq.clone().detach())
self.set_cpu("Ci", frozen_ci.clone().detach())
self.transfer_cpu_to_enclave("CQ")
self.transfer_cpu_to_enclave("Ci")
with NamedTimerInstance("Fused Recon"):
self.fused_recon("Cf", "CQ", "Ci", "Af", "Bf")
self.transfer_enclave_to_cpu("Cf")
ideal_cq = mod_move_down(frozen_cq + frozen_ci)
ideal_cf = dequantize_op(NamedParam(layer_name_op+"Z", ideal_cq), layer_name_op)
compare_expected_actual(ideal_cf, self.get_cpu("Cf"), get_relative=False, verbose=True)
Tester = EnclaveInterfaceTester()
Tester.init_test()
# Tester.test_plain_compute()
# Tester.test_tensor()
# Tester.test_quantize_only()
# Tester.test_quantize_dequantize()
# Tester.test_quantize_plainconv2d()
# Tester.test_quantize_plain_conv2d_backward_weight()
# Tester.test_async_test()
# Tester.test_async_task()
Tester.marshal_fused_share()
# Tester.test_fused_recon()
| 40.447867 | 113 | 0.62593 |
769859119bdc741f843ae4af0e7a80cb38a9c0e9 | 1,067 | py | Python | Notebooks/SentinelUtilities/SentinelAzure/azure_loganalytics_helper.py | ytognder/Azure-Sentinel | 7345560f178e731d7ba5a5541fd3383bca285311 | [
"MIT"
] | 266 | 2019-10-18T00:41:39.000Z | 2022-03-18T05:44:01.000Z | Notebooks/SentinelUtilities/SentinelAzure/azure_loganalytics_helper.py | ytognder/Azure-Sentinel | 7345560f178e731d7ba5a5541fd3383bca285311 | [
"MIT"
] | 113 | 2020-03-10T16:56:10.000Z | 2022-03-28T21:54:26.000Z | Notebooks/SentinelUtilities/SentinelAzure/azure_loganalytics_helper.py | ytognder/Azure-Sentinel | 7345560f178e731d7ba5a5541fd3383bca285311 | [
"MIT"
] | 93 | 2020-01-07T20:28:43.000Z | 2022-03-23T04:09:39.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Azure LogAnalytics Helper:
This module provides helper methods to initialize and
manipulate LogAnalyticsManagementClient object.
Workspace is the focal point.
"""
class LogAnalyticsHelper():
""" Helper class for Log Analytics """
def __init__(self, la_client):
self.la_client = la_client
def get_workspace_name_list(self):
""" retrieve L.A. workspace names as a list """
return sorted([ws.name for ws in self.la_client.workspaces.list()])
def get_workspace_id(self, workspace_name):
""" retrieve L.A. workspace id based on workspace name """
workspace = next(ws for ws in self.la_client.workspaces.list() if ws.name == workspace_name)
return workspace.customer_id
# end of the class
| 36.793103 | 100 | 0.614808 |
1f157a1a1574e6c798f2785076b4810fb58e20ec | 9,147 | py | Python | fastai/layers.py | joel-odlund/fastai | a76d611932eb68c8974e50be943f1becaf60fe62 | [
"Apache-2.0"
] | null | null | null | fastai/layers.py | joel-odlund/fastai | a76d611932eb68c8974e50be943f1becaf60fe62 | [
"Apache-2.0"
] | null | null | null | fastai/layers.py | joel-odlund/fastai | a76d611932eb68c8974e50be943f1becaf60fe62 | [
"Apache-2.0"
] | null | null | null | "`fastai.layers` provides essential functions to building and modifying `model` architectures"
from .torch_core import *
__all__ = ['AdaptiveConcatPool2d', 'MSELossFlat', 'CrossEntropyFlat', 'Debugger', 'Flatten', 'Lambda', 'PoolFlatten', 'ResizeBatch',
'bn_drop_lin', 'conv2d', 'conv2d_trans', 'conv_layer', 'embedding', 'simple_cnn', 'NormType', 'relu',
'batchnorm_2d', 'std_upsample_head', 'trunc_normal_', 'PixelShuffle_ICNR', 'icnr', 'NoopLoss', 'WassersteinLoss']
class Lambda(nn.Module):
"An easy way to create a pytorch layer for a simple `func`."
def __init__(self, func:LambdaFunc):
"create a layer that simply calls `func` with `x`"
super().__init__()
self.func=func
def forward(self, x): return self.func(x)
def ResizeBatch(*size:int) -> Tensor:
"Layer that resizes x to `size`, good for connecting mismatched layers."
return Lambda(lambda x: x.view((-1,)+size))
def Flatten()->Tensor:
"Flatten `x` to a single dimension, often used at the end of a model."
return Lambda(lambda x: x.view((x.size(0), -1)))
def PoolFlatten()->nn.Sequential:
"Apply `nn.AdaptiveAvgPool2d` to `x` and then flatten the result."
return nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten())
NormType = Enum('NormType', 'Batch BatchZero Weight Spectral')
def batchnorm_2d(nf:int, norm_type:NormType=NormType.Batch):
bn = nn.BatchNorm2d(nf)
with torch.no_grad():
bn.bias.fill_(1e-3)
bn.weight.fill_(0. if norm_type==NormType.BatchZero else 1.)
return bn
def bn_drop_lin(n_in:int, n_out:int, bn:bool=True, p:float=0., actn:Optional[nn.Module]=None):
"Sequence of batchnorm (if `bn`), dropout (with `p`) and linear (`n_in`,`n_out`) layers followed by `actn`."
layers = [nn.BatchNorm1d(n_in)] if bn else []
if p != 0: layers.append(nn.Dropout(p))
layers.append(nn.Linear(n_in, n_out))
if actn is not None: layers.append(actn)
return layers
def conv1d(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False):
"Create and iniialize `nn.Conv1d` layer."
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias: conv.bias.data.zero_()
return spectral_norm(conv)
class SelfAttention(nn.Module):
"Self attention layer for 2d."
def __init__(self, n_channels:int):
super().__init__()
self.query = conv1d(n_channels, n_channels//8)
self.key = conv1d(n_channels, n_channels//8)
self.value = conv1d(n_channels, n_channels)
self.gamma = nn.Parameter(tensor([0.]))
def forward(self, x):
#Notations from https://arxiv.org/pdf/1805.08318.pdf
size = x.size()
x = x.view(*size[:2],-1)
f,g,h = self.query(x),self.key(x),self.value(x)
beta = F.softmax(torch.bmm(f.permute(0,2,1).contiguous(), g), dim=1)
o = self.gamma * torch.bmm(h, beta) + x
return o.view(*size).contiguous()
def conv2d(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias=False, init:LayerFunc=nn.init.kaiming_normal_) -> nn.Conv2d:
"Create and initialize `nn.Conv2d` layer. `padding` defaults to `ks//2`."
if padding is None: padding = ks//2
return init_default(nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias), init)
def conv2d_trans(ni:int, nf:int, ks:int=2, stride:int=2, padding:int=0, bias=False) -> nn.ConvTranspose2d:
"Create `nn.ConvTranspose2d` layer."
return nn.ConvTranspose2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias)
def relu(inplace:bool=False, leaky:float=None):
return nn.LeakyReLU(inplace=inplace, negative_slope=leaky) if leaky is not None else nn.ReLU(inplace=inplace)
def conv_layer(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias:bool=None,
norm_type:Optional[NormType]=NormType.Batch, use_activ:bool=True, leaky:float=None,
transpose:bool=False, init:Callable=nn.init.kaiming_normal_, self_attention:bool=False):
"Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers."
if padding is None: padding = (ks-1)//2 if not transpose else 0
bn = norm_type in (NormType.Batch, NormType.BatchZero)
if bias is None: bias = not bn
conv_func = nn.ConvTranspose2d if transpose else nn.Conv2d
conv = init_default(conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding), init)
if norm_type==NormType.Weight: conv = weight_norm(conv)
elif norm_type==NormType.Spectral: conv = spectral_norm(conv)
layers = [conv]
if use_activ: layers.append(relu(True, leaky=leaky))
if bn: layers.append(nn.BatchNorm2d(nf))
#layers.append(batchnorm_2d(nf, norm_type=norm_type))
if self_attention: layers.append(SelfAttention(nf))
return nn.Sequential(*layers)
class SequentialResBlock(nn.Module):
"A resnet block using an `nn.Sequential` containing `layers`"
def __init__(self, *layers):
super().__init__()
self.layers = nn.Sequential(*layers)
def forward(self, x): return x + self.layers(x)
class AdaptiveConcatPool2d(nn.Module):
"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`."
def __init__(self, sz:Optional[int]=None):
"Output will be 2*sz or 2 if sz is None"
super().__init__()
sz = sz or 1
self.ap,self.mp = nn.AdaptiveAvgPool2d(sz), nn.AdaptiveMaxPool2d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
class Debugger(nn.Module):
"A module to debug inside a model."
def forward(self,x:Tensor) -> Tensor:
set_trace()
return x
def std_upsample_head(c, *nfs:Collection[int]) -> nn.Module:
"Create a sequence of upsample layers."
return nn.Sequential(
nn.ReLU(),
*(conv_layer(nfs[i],nfs[i+1],ks=2, stride=2, padding=0, transpose=True) for i in range(4)),
conv2d_trans(nfs[-1], c)
)
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init."
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
x.data.copy_(k)
class PixelShuffle_ICNR(nn.Module):
"Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`, `icnr` init, and `weight_norm`."
def __init__(self, ni:int, nf:int=None, scale:int=2, blur:bool=False, norm_type=NormType.Weight, leaky:float=None):
super().__init__()
nf = ifnone(nf, ni)
self.conv = conv_layer(ni, nf*(scale**2), ks=1, norm_type=norm_type, use_activ=False)
icnr(self.conv[0].weight)
self.shuf = nn.PixelShuffle(scale)
# Blurring over (h*w) kernel
# "Super-Resolution using Convolutional Neural Networks without Any Checkerboard Artifacts"
# - https://arxiv.org/abs/1806.02658
self.pad = nn.ReplicationPad2d((1,0,1,0))
self.blur = nn.AvgPool2d(2, stride=1)
self.relu = relu(True, leaky=leaky)
def forward(self,x):
x = self.shuf(self.relu(self.conv(x)))
return self.blur(self.pad(x)) if self.blur else x
class CrossEntropyFlat(nn.CrossEntropyLoss):
"Same as `nn.CrossEntropyLoss`, but flattens input and target."
def forward(self, input:Tensor, target:Tensor) -> Rank0Tensor:
n,c,*_ = input.shape
return super().forward(input.view(n, c, -1), target.view(n, -1))
class MSELossFlat(nn.MSELoss):
"Same as `nn.MSELoss`, but flattens input and target."
def forward(self, input:Tensor, target:Tensor) -> Rank0Tensor:
return super().forward(input.view(-1), target.view(-1))
class NoopLoss(nn.Module):
"Just returns the mean of the `output`."
def forward(self, output, target): return output.mean()
class WassersteinLoss(nn.Module):
"For WGAN."
def forward(self, real, fake): return real[0] - fake[0]
def simple_cnn(actns:Collection[int], kernel_szs:Collection[int]=None,
strides:Collection[int]=None, bn=False) -> nn.Sequential:
"CNN with `conv_layer` defined by `actns`, `kernel_szs` and `strides`, plus batchnorm if `bn`."
nl = len(actns)-1
kernel_szs = ifnone(kernel_szs, [3]*nl)
strides = ifnone(strides , [2]*nl)
layers = [conv_layer(actns[i], actns[i+1], kernel_szs[i], stride=strides[i],
norm_type=(NormType.Batch if bn and i<(len(strides)-1) else None)) for i in range_of(strides)]
layers.append(PoolFlatten())
return nn.Sequential(*layers)
def trunc_normal_(x:Tensor, mean:float=0., std:float=1.) -> Tensor:
"Truncated normal initialization."
# From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12
return x.normal_().fmod_(2).mul_(std).add_(mean)
def embedding(ni:int,nf:int) -> nn.Module:
"Create an embedding layer."
emb = nn.Embedding(ni, nf)
# See https://arxiv.org/abs/1711.09160
with torch.no_grad(): trunc_normal_(emb.weight, std=0.01)
return emb
| 45.282178 | 134 | 0.66776 |
dae8c963c62e79b26738992540c20882887a2399 | 3,876 | py | Python | africastalking/SMS.py | RawPlutonium/africastalking-python | 753e8974913ed7707f6d8a7c6520ecaa96cc25d9 | [
"MIT"
] | 12 | 2019-08-02T07:58:16.000Z | 2022-01-31T23:45:08.000Z | africastalking/SMS.py | RawPlutonium/africastalking-python | 753e8974913ed7707f6d8a7c6520ecaa96cc25d9 | [
"MIT"
] | 8 | 2019-08-02T08:06:18.000Z | 2022-03-11T23:45:17.000Z | africastalking/SMS.py | RawPlutonium/africastalking-python | 753e8974913ed7707f6d8a7c6520ecaa96cc25d9 | [
"MIT"
] | 11 | 2019-07-31T16:23:36.000Z | 2022-01-29T08:30:07.000Z | from . Service import APIService, validate_phone
class SMSService(APIService):
def __init__(self, username, api_key):
super(SMSService, self).__init__(username, api_key)
def _init_service(self):
super(SMSService, self)._init_service()
self._baseUrl = self._baseUrl + '/version1'
def send(self, message, recipients, sender_id=None, enqueue=False, callback=None):
for phone in recipients:
if not validate_phone(phone):
raise ValueError('Invalid phone number: ' + phone)
url = self._make_url('/messaging')
data = {
'username': self._username,
'to': ','.join(recipients),
'message': message,
'bulkSMSMode': 1,
}
if sender_id is not None:
data['from'] = sender_id
if enqueue:
data['enqueue'] = 1
return self._make_request(url, 'POST', headers=self._headers, params=None, data=data, callback=callback)
def send_premium(self, message, keyword, link_id, recipients, sender_id=None,
retry_duration_in_hours=None, callback=None):
for phone in recipients:
if not validate_phone(phone):
raise ValueError('Invalid phone number: ' + phone)
url = self._make_url('/messaging')
data = {
'username': self._username,
'to': ','.join(recipients),
'message': message,
'bulkSMSMode': 0,
'keyword': keyword,
'linkId': link_id
}
if sender_id is not None:
data['from'] = sender_id
if retry_duration_in_hours is not None:
data['retryDurationInHours'] = retry_duration_in_hours
return self._make_request(url, 'POST', headers=self._headers, params=None, data=data, callback=callback)
def fetch_messages(self, last_received_id=None, callback=None):
url = self._make_url('/messaging')
params = {
'username': self._username
}
if last_received_id is not None:
params['lastReceivedId'] = last_received_id
return self._make_request(url, 'GET', headers=self._headers, params=params, data=None, callback=callback)
def fetch_subscriptions(self, short_code, keyword, last_received_id=None, callback=None):
url = self._make_url('/subscription')
params = {
'username': self._username,
'shortCode': short_code,
'keyword': keyword
}
if last_received_id is not None:
params['lastReceivedId'] = last_received_id
return self._make_request(url, 'GET', headers=self._headers, params=params, data=None, callback=callback)
def create_subscription(self, short_code, keyword, phone_number, checkout_token, callback=None):
if not validate_phone(phone_number):
raise ValueError('Invalid phone number')
url = self._make_url('/subscription/create')
data = {
'username': self._username,
'shortCode': short_code,
'keyword': keyword,
'phoneNumber': phone_number,
'checkoutToken': checkout_token,
}
return self._make_request(url, 'POST', headers=self._headers, data=data, params=None, callback=callback)
def delete_subscription(self, short_code, keyword, phone_number, callback=None):
if not validate_phone(phone_number):
raise ValueError('Invalid phone number')
url = self._make_url('/subscription/delete')
data = {
'username': self._username,
'shortCode': short_code,
'keyword': keyword,
'phoneNumber': phone_number
}
return self._make_request(url, 'POST', headers=self._headers, data=data, params=None, callback=callback)
| 34 | 113 | 0.610681 |
9f75c48ba36b6a7803bd6b088fc90ac06de47dd8 | 1,247 | py | Python | jitlog/constants.py | NyanKiyoshi/vmprof-python | 8e58464956e1553ebafacb0afc5de9dd08c197e3 | [
"MIT"
] | 430 | 2015-01-31T13:41:07.000Z | 2022-01-24T02:04:23.000Z | jitlog/constants.py | NyanKiyoshi/vmprof-python | 8e58464956e1553ebafacb0afc5de9dd08c197e3 | [
"MIT"
] | 202 | 2015-02-06T19:01:38.000Z | 2022-03-22T15:15:20.000Z | jitlog/constants.py | NyanKiyoshi/vmprof-python | 8e58464956e1553ebafacb0afc5de9dd08c197e3 | [
"MIT"
] | 59 | 2015-02-08T16:06:28.000Z | 2022-01-11T00:12:37.000Z | # generated constants from rpython/rlib/jitlog.py
import struct
MARK_JITLOG_START = struct.pack("b", 0x10)
MARK_INPUT_ARGS = struct.pack("b", 0x11)
MARK_RESOP_META = struct.pack("b", 0x12)
MARK_RESOP = struct.pack("b", 0x13)
MARK_RESOP_DESCR = struct.pack("b", 0x14)
MARK_ASM_ADDR = struct.pack("b", 0x15)
MARK_ASM = struct.pack("b", 0x16)
MARK_TRACE = struct.pack("b", 0x17)
MARK_TRACE_OPT = struct.pack("b", 0x18)
MARK_TRACE_ASM = struct.pack("b", 0x19)
MARK_STITCH_BRIDGE = struct.pack("b", 0x1a)
MARK_START_TRACE = struct.pack("b", 0x1b)
MARK_JITLOG_COUNTER = struct.pack("b", 0x1c)
MARK_INIT_MERGE_POINT = struct.pack("b", 0x1d)
MARK_JITLOG_HEADER = struct.pack("b", 0x1e)
MARK_MERGE_POINT = struct.pack("b", 0x1f)
MARK_COMMON_PREFIX = struct.pack("b", 0x20)
MARK_ABORT_TRACE = struct.pack("b", 0x21)
MARK_SOURCE_CODE = struct.pack("b", 0x22)
MARK_REDIRECT_ASSEMBLER = struct.pack("b", 0x23)
MARK_TMP_CALLBACK = struct.pack("b", 0x24)
MARK_JITLOG_END = struct.pack("b", 0x25)
MP_INDEX = (0x4,"i")
MP_SCOPE = (0x8,"s")
MP_FILENAME = (0x1,"s")
MP_OPCODE = (0x10,"s")
MP_LINENO = (0x2,"i")
MP_STR = (0x0,"s")
MP_INT = (0x0,"i")
SEM_TYPE_NAMES = {
0x4: "index",
0x8: "scope",
0x1: "filename",
0x10: "opcode",
0x2: "lineno",
}
| 31.974359 | 49 | 0.700882 |
11d9c0ffdd5848652dc7d5ab81ae7d83f66c3c57 | 4,021 | py | Python | tests/test_django_configurations.py | cornershop/pytest-django | 6c51fe31a9cc31a0816785dc4f40114dff56da7b | [
"BSD-3-Clause"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | tests/test_django_configurations.py | cornershop/pytest-django | 6c51fe31a9cc31a0816785dc4f40114dff56da7b | [
"BSD-3-Clause"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | tests/test_django_configurations.py | cornershop/pytest-django | 6c51fe31a9cc31a0816785dc4f40114dff56da7b | [
"BSD-3-Clause"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | """Tests which check the various ways you can set DJANGO_SETTINGS_MODULE
If these tests fail you probably forgot to install django-configurations.
"""
import pytest
pytest.importorskip("configurations")
BARE_SETTINGS = """
from configurations import Configuration
class MySettings(Configuration):
# At least one database must be configured
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
SECRET_KEY = 'foobar'
"""
def test_dc_env(testdir, monkeypatch):
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "tpkg.settings_env")
monkeypatch.setenv("DJANGO_CONFIGURATION", "MySettings")
pkg = testdir.mkpydir("tpkg")
settings = pkg.join("settings_env.py")
settings.write(BARE_SETTINGS)
testdir.makepyfile(
"""
import os
def test_settings():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines([
'django: settings: tpkg.settings_env (from env), configuration: MySettings (from env)',
"* 1 passed in*",
])
assert result.ret == 0
def test_dc_env_overrides_ini(testdir, monkeypatch):
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "tpkg.settings_env")
monkeypatch.setenv("DJANGO_CONFIGURATION", "MySettings")
testdir.makeini(
"""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
DJANGO_CONFIGURATION = DO_NOT_USE_ini
"""
)
pkg = testdir.mkpydir("tpkg")
settings = pkg.join("settings_env.py")
settings.write(BARE_SETTINGS)
testdir.makepyfile(
"""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines([
'django: settings: tpkg.settings_env (from env), configuration: MySettings (from env)',
"* 1 passed in*",
])
assert result.ret == 0
def test_dc_ini(testdir, monkeypatch):
monkeypatch.delenv("DJANGO_SETTINGS_MODULE")
testdir.makeini(
"""
[pytest]
DJANGO_SETTINGS_MODULE = tpkg.settings_ini
DJANGO_CONFIGURATION = MySettings
"""
)
pkg = testdir.mkpydir("tpkg")
settings = pkg.join("settings_ini.py")
settings.write(BARE_SETTINGS)
testdir.makepyfile(
"""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_ini'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines([
'django: settings: tpkg.settings_ini (from ini), configuration: MySettings (from ini)',
"* 1 passed in*",
])
assert result.ret == 0
def test_dc_option(testdir, monkeypatch):
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "DO_NOT_USE_env")
monkeypatch.setenv("DJANGO_CONFIGURATION", "DO_NOT_USE_env")
testdir.makeini(
"""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
DJANGO_CONFIGURATION = DO_NOT_USE_ini
"""
)
pkg = testdir.mkpydir("tpkg")
settings = pkg.join("settings_opt.py")
settings.write(BARE_SETTINGS)
testdir.makepyfile(
"""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_opt'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = testdir.runpytest_subprocess("--ds=tpkg.settings_opt", "--dc=MySettings")
result.stdout.fnmatch_lines([
'django: settings: tpkg.settings_opt (from option),'
' configuration: MySettings (from option)',
"* 1 passed in*",
])
assert result.ret == 0
| 28.51773 | 95 | 0.643372 |
88efe309d60ec4abb9c8fb37ab4b3d1729f7483e | 1,775 | py | Python | browserexport/model.py | seanbreckenridge/browserexport | 24166a6704f6ac65d66383b1dd73ea7096859e32 | [
"MIT"
] | 32 | 2021-05-17T18:24:15.000Z | 2022-03-29T20:14:51.000Z | browserexport/model.py | seanbreckenridge/ffexport | 24166a6704f6ac65d66383b1dd73ea7096859e32 | [
"MIT"
] | 17 | 2020-08-31T23:35:55.000Z | 2021-04-18T21:56:21.000Z | browserexport/model.py | seanbreckenridge/ffexport | 24166a6704f6ac65d66383b1dd73ea7096859e32 | [
"MIT"
] | 1 | 2022-02-11T23:35:16.000Z | 2022-02-11T23:35:16.000Z | """
A namedtuple representaton for the extracted info
"""
from datetime import datetime
from typing import Optional, NamedTuple, Dict, Any
Second = int
# typically isn't used complete by one browser, inludes
# partial information from browsers which supply the information
class Metadata(NamedTuple):
title: Optional[str] = None
description: Optional[str] = None
preview_image: Optional[str] = None
duration: Optional[Second] = None
@classmethod
def make(
cls,
title: Optional[str] = None,
description: Optional[str] = None,
preview_image: Optional[str] = None,
duration: Optional[Second] = None,
) -> Optional["Metadata"]:
"""
Alternate constructor; only make the Metadata object if the user
supplies at least one piece of data
"""
if (title or description or preview_image or duration) is None:
return None
return cls(
title=title,
description=description,
preview_image=preview_image,
duration=duration,
)
def test_make_metadata() -> None:
assert Metadata.make(None, None, None, None) is None
assert Metadata.make(title="webpage title", duration=5) is not None
class Visit(NamedTuple):
url: str
dt: datetime
# hmm, does this being optional make it more annoying to consume
# by other programs? reduces the amount of data that other programs
# need to consume, so theres a tradeoff...
metadata: Optional[Metadata] = None
def serialize(self) -> Dict[str, Any]:
return {
"url": self.url,
"dt": self.dt.timestamp(),
"metadata": self.metadata._asdict() if self.metadata is not None else None,
}
| 29.583333 | 87 | 0.64169 |
3b564517d69b4d53752bbf05e6e3ac9936e46cad | 15,796 | py | Python | distribute_setup.py | jasuca/pygrowup | 18a6e916ff1e416bbd68afd125fb982ce87091ca | [
"BSD-3-Clause"
] | null | null | null | distribute_setup.py | jasuca/pygrowup | 18a6e916ff1e416bbd68afd125fb982ce87091ca | [
"BSD-3-Clause"
] | null | null | null | distribute_setup.py | jasuca/pygrowup | 18a6e916ff1e416bbd68afd125fb982ce87091ca | [
"BSD-3-Clause"
] | 1 | 2021-12-28T13:19:52.000Z | 2021-12-28T13:19:52.000Z | #!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
from __future__ import absolute_import
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.24"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| 32.435318 | 80 | 0.619714 |
0b3430d61a7094503857360630edc741f602d028 | 24,641 | py | Python | examples/navier-stokes/single_instance/b1_stokes_ns_resmin_base.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | 1 | 2021-12-02T06:42:38.000Z | 2021-12-02T06:42:38.000Z | examples/navier-stokes/single_instance/b1_stokes_ns_resmin_base.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | null | null | null | examples/navier-stokes/single_instance/b1_stokes_ns_resmin_base.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | 2 | 2021-12-01T20:53:24.000Z | 2021-12-02T06:42:39.000Z | import os
import sys
import math
import json
import torch
import numpy as np
import scipy.io
from scipy import ndimage
import matplotlib
# from skimage import io
# matplotlib.use("pgf")
matplotlib.rcParams.update({
# 'font.family': 'serif',
'font.size':8,
})
from matplotlib import pyplot as plt
import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import TensorBoardLogger
seed_everything(42)
import DiffNet
from DiffNet.DiffNetFEM import DiffNet2DFEM
from torch.utils import data
# from e1_stokes_base_resmin import Stokes2D
from pytorch_lightning.callbacks.base import Callback
torch.set_printoptions(precision=10)
class OptimSwitchLBFGS(Callback):
def __init__(self, epochs=50):
self.switch_epoch = epochs
self.print_declaration = False
def on_epoch_start(self, trainer, pl_module):
if trainer.current_epoch == self.switch_epoch:
if not self.print_declaration:
print("======================Switching to LBFGS after {} epochs ======================".format(self.switch_epoch))
self.print_declaration = True
opts = [torch.optim.LBFGS(pl_module.net_u.parameters(), lr=pl_module.learning_rate, max_iter=5),
torch.optim.LBFGS(pl_module.net_v.parameters(), lr=pl_module.learning_rate, max_iter=5),
# torch.optim.LBFGS(pl_module.net_p.parameters(), lr=pl_module.learning_rate, max_iter=5),
torch.optim.Adam(pl_module.net_p.parameters(), lr=pl_module.learning_rate)
]
trainer.optimizers = opts
class Stokes_NS_Dataset(data.Dataset):
'PyTorch dataset for Stokes_NS_Dataset'
def __init__(self, domain_lengths=(1.,1.), domain_sizes=(32,32), Re=1):
"""
Initialization
"""
x = np.linspace(0, domain_lengths[0], domain_sizes[0])
y = np.linspace(0, domain_lengths[1], domain_sizes[1])
xx , yy = np.meshgrid(x, y)
self.x = xx
self.y = yy
# bc1 for fixed boundaries
self.bc1 = np.zeros_like(xx)
self.bc1[ 0, :] = 1.0
self.bc1[-1, :] = 1.0
self.bc1[ :, 0] = 1.0
self.bc1[ :,-1] = 1.0
self.bc2 = np.zeros_like(xx)
self.bc2[ 0, :] = 1.0
self.bc2[-1, :] = 1.0
self.bc2[ :, 0] = 1.0
self.bc2[ :,-1] = 1.0
self.bc3 = np.zeros_like(xx)
self.bc3[0:1,0:1] = 1.0
self.Re = Re
self.nu = np.ones_like(self.x) / self.Re
self.n_samples = 100
def __len__(self):
'Denotes the total number of samples'
return self.n_samples
def __getitem__(self, index):
'Generates one sample of data'
inputs = np.array([self.x, self.y, self.bc1, self.bc2, self.bc3, self.nu])
forcing = np.ones_like(self.x)*(1/self.Re)
return torch.FloatTensor(inputs), torch.FloatTensor(forcing).unsqueeze(0)
class Stokes_NS_Base_2D(DiffNet2DFEM):
"""docstring for Stokes_NS_Base_2D"""
def __init__(self, network, dataset, **kwargs):
super(Stokes_NS_Base_2D, self).__init__(network[0], dataset, **kwargs)
self.plot_frequency = kwargs.get('plot_frequency', 1)
self.eq_type = kwargs.get('eq_type', 'ns')
self.mapping_type = kwargs.get('mapping_type', 'no_network')
print('plot_frequency = ', self.plot_frequency)
print('eq_type = ', self.eq_type)
print('mapping_type = ', self.mapping_type)
# self.net_u = network[0]
# self.net_v = network[1]
# self.net_p = network[2]
# self.Re = self.dataset.Re
# self.viscosity = 1. / self.Re
# self.pspg_param = self.h**2 * self.Re / 12.
# ue, ve, pe = self.exact_solution(self.dataset.x, self.dataset.y)
# self.u_exact = torch.FloatTensor(ue)
# self.v_exact = torch.FloatTensor(ve)
# self.p_exact = torch.FloatTensor(pe)
# fx_gp, fy_gp = self.forcing(self.xgp, self.ygp)
# self.fx_gp = torch.FloatTensor(fx_gp)
# self.fy_gp = torch.FloatTensor(fy_gp)
# u_bc = np.zeros_like(self.dataset.x); u_bc[-1,:] = 1. - 16. * (self.dataset.x[-1,:]-0.5)**4
# v_bc = np.zeros_like(self.dataset.x)
# p_bc = np.zeros_like(self.dataset.x)
# self.u_bc = torch.FloatTensor(u_bc)
# self.v_bc = torch.FloatTensor(v_bc)
# self.p_bc = torch.FloatTensor(p_bc)
# numerical = np.loadtxt('ns-ldc-numerical-results/midline_cuts_Re100_regularized_128x128.txt', delimiter=",", skiprows=1)
# self.midline_X = numerical[:,0]
# self.midline_Y = numerical[:,0]
# self.midline_U = numerical[:,1]
# self.midline_V = numerical[:,2]
# self.topline_P = numerical[:,3]
def exact_solution(self, x, y):
print("exact_solution -- Base class called")
u_exact = np.zeros_like(x)
v_exact = np.zeros_like(x)
p_exact = np.zeros_like(x)
return u_exact, v_exact, p_exact
def forcing(self, x, y):
print("forcing -- Base class called")
fx = np.zeros_like(x)
fy = np.zeros_like(x)
return fx, fy
def calc_tau(self, h_tuple, adv_tuple, visco):
'''
values input to this function should be detached
from the computation graph
'''
hx, hy = h_tuple
u, v = adv_tuple
g = torch.tensor([2./hx, 2./hy])
G = torch.tensor([[4./hx**2, 0.], [0., 4./hy**2]])
Cinv = 36.
# assume regular grid
adv_part = G[0,0] * u**2 + G[1,1] * v**2
diffusion_part = Cinv* visco**2 * (G[0,0]**2 + G[1,1]**2)
# calc taum at GP
temp = torch.sqrt(adv_part + diffusion_part)
taum = 1. / temp
# calc tauc at GP
gg_inv = 1. / (g[0]**2 + g[1]**2)
tauc = temp * gg_inv
return taum, tauc
def Q1_vector_assembly(self, Aglobal, Aloc_all):
Aglobal[:,0, 0:-1, 0:-1] += Aloc_all[:,0, :, :]
Aglobal[:,0, 0:-1, 1: ] += Aloc_all[:,1, :, :]
Aglobal[:,0, 1: , 0:-1] += Aloc_all[:,2, :, :]
Aglobal[:,0, 1: , 1: ] += Aloc_all[:,3, :, :]
return Aglobal
def calc_residuals_Stokes(self, pred, inputs_tensor, forcing_tensor):
visco_scalar = self.viscosity
visco = visco_scalar
hx = self.hx
hy = self.hy
N_values = self.Nvalues.type_as(pred[0])
dN_x_values = self.dN_x_values.type_as(pred[0])
dN_y_values = self.dN_y_values.type_as(pred[0])
gpw = self.gpw.type_as(pred[0])
f1 = self.fx_gp.type_as(pred[0])
f2 = self.fy_gp.type_as(pred[0])
u_bc = self.u_bc.unsqueeze(0).unsqueeze(0).type_as(pred[0])
v_bc = self.v_bc.unsqueeze(0).unsqueeze(0).type_as(pred[0])
p_bc = self.p_bc.unsqueeze(0).unsqueeze(0).type_as(pred[0])
f = forcing_tensor # renaming variable
u_pred = pred[0] #[:,0:1,:,:]
v_pred = pred[1] #[:,1:2,:,:]
p_pred = pred[2] #[:,2:3,:,:]
# extract diffusivity and boundary conditions here
x = inputs_tensor[:,0:1,:,:]
y = inputs_tensor[:,1:2,:,:]
bc1 = inputs_tensor[:,2:3,:,:]
bc2 = inputs_tensor[:,3:4,:,:]
bc3 = inputs_tensor[:,4:5,:,:]
nu = inputs_tensor[:,5:6,:,:]
# DERIVE NECESSARY VALUES
trnsfrm_jac = 1. #(0.5*hx)*(0.5*hy)
JxW = (gpw*trnsfrm_jac).unsqueeze(-1).unsqueeze(-1).unsqueeze(0)
# apply boundary conditions
u_pred = torch.where(bc1>=0.5, u_bc, u_pred)
v_pred = torch.where(bc2>=0.5, v_bc, v_pred)
p_pred = torch.where(bc3>=0.5, p_bc, p_pred)
# variable values at GP
# visco = self.gauss_pt_evaluation(nu)
u = self.gauss_pt_evaluation(u_pred)
v = self.gauss_pt_evaluation(v_pred)
p = self.gauss_pt_evaluation(p_pred)
# 1st derivatives at GP
p_x = self.gauss_pt_evaluation_der_x(p_pred)
p_y = self.gauss_pt_evaluation_der_y(p_pred)
u_x = self.gauss_pt_evaluation_der_x(u_pred)
u_y = self.gauss_pt_evaluation_der_y(u_pred)
v_x = self.gauss_pt_evaluation_der_x(v_pred)
v_y = self.gauss_pt_evaluation_der_y(v_pred)
# 2nd derivatives at GP
# u_xx = self.gauss_pt_evaluation_der2_x(u_pred)
# u_yy = self.gauss_pt_evaluation_der2_y(u_pred)
# v_xx = self.gauss_pt_evaluation_der2_x(v_pred)
# v_yy = self.gauss_pt_evaluation_der2_y(v_pred)
# taum, tauc = self.calc_tau((hx,hy), (u.clone().detach(),v.clone().detach()), visco)
# CALCULATION STARTS
# lhs
W_U1x = N_values*u_x
W_U2y = N_values*v_y
Wx_U1x = dN_x_values*u_x
Wy_U1y = dN_y_values*u_y
Wx_U2x = dN_x_values*v_x
Wy_U2y = dN_y_values*v_y
Wx_P = dN_x_values*p
Wy_P = dN_y_values*p
Wx_Px = dN_x_values*p_x
Wy_Py = dN_y_values*p_y
# rhs
W_F1 = N_values*f1
W_F2 = N_values*f2
# integrated values on lhs & rhs
temp1 = visco*(Wx_U1x+Wy_U1y) - Wx_P # - W_F1
temp2 = visco*(Wx_U2x+Wy_U2y) - Wy_P # - W_F2
temp3 = W_U1x+W_U2y + self.pspg_param*(Wx_Px+Wy_Py)
# # integrated values on lhs & rhs
# temp1 = W_Adv1 + visco*(Wx_U1x+Wy_U1y) - Wx_P - W_F1 + taum*C1_1 - taum*C2_1 - taum**2*Rey_1 + tauc*Wx_Div
# temp2 = W_Adv2 + visco*(Wx_U2x+Wy_U2y) - Wy_P - W_F2 + taum*C1_2 - taum*C2_2 - taum**2*Rey_2 + tauc*Wy_Div
# temp3 = W_Div + taum*(Wx_Res1 + Wy_Res2)
# unassembled residual
R_split_1 = torch.sum(temp1*JxW, 2) # sum across all GP
R_split_2 = torch.sum(temp2*JxW, 2) # sum across all GP
R_split_3 = torch.sum(temp3*JxW, 2) # sum across all GP
# assembly
R1 = torch.zeros_like(u_pred); R1 = self.Q1_vector_assembly(R1, R_split_1)
R2 = torch.zeros_like(u_pred); R2 = self.Q1_vector_assembly(R2, R_split_2)
R3 = torch.zeros_like(u_pred); R3 = self.Q1_vector_assembly(R3, R_split_3)
# add boundary conditions to R <---- this step is very important
R1 = torch.where(bc1>=0.5, u_bc, R1)
R2 = torch.where(bc2>=0.5, v_bc, R2)
R3 = torch.where(bc3>=0.5, p_bc, R3)
return R1, R2, R3
def calc_residuals_NS(self, pred, inputs_tensor, forcing_tensor):
visco_scalar = self.viscosity
visco = visco_scalar
hx = self.h
hy = self.h
N_values = self.Nvalues.type_as(pred[0])
dN_x_values = self.dN_x_values.type_as(pred[0])
dN_y_values = self.dN_y_values.type_as(pred[0])
gpw = self.gpw.type_as(pred[0])
f1 = self.fx_gp.type_as(pred[0])
f2 = self.fy_gp.type_as(pred[0])
u_bc = self.u_bc.unsqueeze(0).unsqueeze(0).type_as(pred[0])
v_bc = self.v_bc.unsqueeze(0).unsqueeze(0).type_as(pred[0])
p_bc = self.p_bc.unsqueeze(0).unsqueeze(0).type_as(pred[0])
f = forcing_tensor # renaming variable
u_pred = pred[0] #[:,0:1,:,:]
v_pred = pred[1] #[:,1:2,:,:]
p_pred = pred[2] #[:,2:3,:,:]
# extract diffusivity and boundary conditions here
x = inputs_tensor[:,0:1,:,:]
y = inputs_tensor[:,1:2,:,:]
bc1 = inputs_tensor[:,2:3,:,:]
bc2 = inputs_tensor[:,3:4,:,:]
bc3 = inputs_tensor[:,4:5,:,:]
nu = inputs_tensor[:,5:6,:,:]
# DERIVE NECESSARY VALUES
trnsfrm_jac = (0.5*hx)*(0.5*hy)
JxW = (gpw*trnsfrm_jac).unsqueeze(-1).unsqueeze(-1).unsqueeze(0)
# apply boundary conditions
u_pred = torch.where(bc1>=0.5, u_bc, u_pred)
v_pred = torch.where(bc2>=0.5, v_bc, v_pred)
p_pred = torch.where(bc3>=0.5, p_bc, p_pred)
# variable values at GP
visco = self.gauss_pt_evaluation(nu)
u = self.gauss_pt_evaluation(u_pred)
v = self.gauss_pt_evaluation(v_pred)
p = self.gauss_pt_evaluation(p_pred)
# 1st derivatives at GP
p_x = self.gauss_pt_evaluation_der_x(p_pred)
p_y = self.gauss_pt_evaluation_der_y(p_pred)
u_x = self.gauss_pt_evaluation_der_x(u_pred)
u_y = self.gauss_pt_evaluation_der_y(u_pred)
v_x = self.gauss_pt_evaluation_der_x(v_pred)
v_y = self.gauss_pt_evaluation_der_y(v_pred)
# 2nd derivatives at GP
u_xx = self.gauss_pt_evaluation_der2_x(u_pred)
u_yy = self.gauss_pt_evaluation_der2_y(u_pred)
v_xx = self.gauss_pt_evaluation_der2_x(v_pred)
v_yy = self.gauss_pt_evaluation_der2_y(v_pred)
# convection terms
adv1 = u*u_x + v*u_y
adv2 = u*v_x + v*v_y
# laplacian terms
lap1 = u_xx + u_yy
lap2 = v_xx + v_yy
# divergence
divergence = u_x + v_y
# coarse scale strong residuals
res1 = adv1 - visco*lap1 + p_x - f1
res2 = adv2 - visco*lap2 + p_y - f2
res3 = divergence
taum, tauc = self.calc_tau((hx,hy), (u.clone().detach(),v.clone().detach()), visco_scalar)
# CALCULATION STARTS
# lhs
W_U1x = N_values*u_x
W_U2y = N_values*v_y
Wx_U1x = dN_x_values*u_x
Wy_U1y = dN_y_values*u_y
Wx_U2x = dN_x_values*v_x
Wy_U2y = dN_y_values*v_y
Wx_P = dN_x_values*p
Wy_P = dN_y_values*p
Wx_Px = dN_x_values*p_x
Wy_Py = dN_y_values*p_y
W_Adv1 = N_values*adv1
W_Adv2 = N_values*adv2
W_Div = N_values*divergence
Wx_Div = dN_x_values*divergence
Wy_Div = dN_y_values*divergence
U_dot_gradW = u * dN_x_values + v * dN_y_values
Res_dot_gradW = res1 * dN_x_values + res2 * dN_y_values
Res_dot_gradU1 = res1 * u_x + res2 * u_y
Res_dot_gradU2 = res1 * v_x + res2 * v_y
# crossterm 1
C1_1 = U_dot_gradW*res1
C1_2 = U_dot_gradW*res2
# crossterm 2
C2_1 = N_values*Res_dot_gradU1
C2_2 = N_values*Res_dot_gradU2
# Reynolds stress term
Rey_1 = res1*Res_dot_gradW
Rey_2 = res2*Res_dot_gradW
# PSPG
Wx_Res1 = dN_x_values*res1
Wy_Res2 = dN_y_values*res2
# rhs
W_F1 = N_values*f1
W_F2 = N_values*f2
# integrated values on lhs & rhs
# temp1 = W_Adv1 + visco*(Wx_U1x+Wy_U1y) - Wx_P - W_F1
# temp2 = W_Adv2 + visco*(Wx_U2x+Wy_U2y) - Wy_P - W_F2
# temp3 = W_U1x+W_U2y + self.pspg_param*(Wx_Px+Wy_Py)
# # integrated values on lhs & rhs
temp1 = W_Adv1 + visco*(Wx_U1x+Wy_U1y) - Wx_P - W_F1 + taum*C1_1 - taum*C2_1 - taum**2*Rey_1 + tauc*Wx_Div
temp2 = W_Adv2 + visco*(Wx_U2x+Wy_U2y) - Wy_P - W_F2 + taum*C1_2 - taum*C2_2 - taum**2*Rey_2 + tauc*Wy_Div
temp3 = W_Div + taum*(Wx_Res1 + Wy_Res2)
# unassembled residual
R_split_1 = torch.sum(temp1*JxW, 2) # sum across all GP
R_split_2 = torch.sum(temp2*JxW, 2) # sum across all GP
R_split_3 = torch.sum(temp3*JxW, 2) # sum across all GP
# assembly
R1 = torch.zeros_like(u_pred); R1 = self.Q1_vector_assembly(R1, R_split_1)
R2 = torch.zeros_like(u_pred); R2 = self.Q1_vector_assembly(R2, R_split_2)
R3 = torch.zeros_like(u_pred); R3 = self.Q1_vector_assembly(R3, R_split_3)
# add boundary conditions to R <---- this step is very important
R1 = torch.where(bc1>=0.5, u_bc, R1)
R2 = torch.where(bc2>=0.5, v_bc, R2)
R3 = torch.where(bc3>=0.5, p_bc, R3)
return R1, R2, R3
def loss(self, pred, inputs_tensor, forcing_tensor):
if self.eq_type == 'stokes':
R1, R2, R3 = self.calc_residuals_Stokes(pred, inputs_tensor, forcing_tensor)
elif self.eq_type == 'ns':
R1, R2, R3 = self.calc_residuals_NS(pred, inputs_tensor, forcing_tensor)
# loss = torch.norm(R1, 'fro') + torch.norm(R2, 'fro') + torch.norm(R3, 'fro')
return torch.norm(R1, 'fro'), torch.norm(R2, 'fro'), torch.norm(R3, 'fro')
def forward(self, batch):
inputs_tensor, forcing_tensor = batch
if self.mapping_type == 'no_network':
return self.net_u[0], self.net_v[0], self.net_p[0], inputs_tensor, forcing_tensor
elif self.mapping_type == 'network':
nu = inputs_tensor[:,5:6,:,:]
return self.net_u(nu), self.net_v(nu), self.net_p(nu), inputs_tensor, forcing_tensor
def training_step(self, batch, batch_idx, optimizer_idx):
u, v, p, inputs_tensor, forcing_tensor = self.forward(batch)
loss_vals = self.loss((u, v, p), inputs_tensor, forcing_tensor)
self.log('loss_u', loss_vals[0].item())
self.log('loss_v', loss_vals[1].item())
self.log('loss_p', loss_vals[2].item())
return {"loss": loss_vals[optimizer_idx]}
def training_step_end(self, training_step_outputs):
loss = training_step_outputs["loss"]
return training_step_outputs
def configure_optimizers(self):
lr = self.learning_rate
# opts = [torch.optim.LBFGS(self.network, lr=1.0, max_iter=5)]
opts = [torch.optim.Adam(self.net_u.parameters(), lr=lr), torch.optim.Adam(self.net_v.parameters(), lr=lr), torch.optim.Adam(self.net_p.parameters(), lr=lr)]
return opts, []
def on_epoch_end(self):
# self.network.eval()
self.net_u.eval()
self.net_v.eval()
self.net_p.eval()
inputs, forcing = self.dataset[0]
u, v, p, u_x_gp, v_y_gp = self.do_query(inputs, forcing)
u = u.squeeze().detach().cpu()
v = v.squeeze().detach().cpu()
p = p.squeeze().detach().cpu()
u_x_gp = u_x_gp.squeeze().detach().cpu()
v_y_gp = v_y_gp.squeeze().detach().cpu()
if self.current_epoch % self.plot_frequency == 0:
self.plot_contours(u, v, p, u_x_gp, v_y_gp)
def do_query(self, inputs, forcing):
u, v, p, inputs_tensor, forcing_tensor = self.forward((inputs.unsqueeze(0).type_as(next(self.net_u.parameters())), forcing.unsqueeze(0).type_as(next(self.net_u.parameters()))))
f = forcing_tensor # renaming variable
# extract diffusivity and boundary conditions here
x = inputs_tensor[:,0:1,:,:]
y = inputs_tensor[:,1:2,:,:]
bc1 = inputs_tensor[:,2:3,:,:]
bc2 = inputs_tensor[:,3:4,:,:]
bc3 = inputs_tensor[:,4:5,:,:]
# apply boundary conditions
u_bc = self.u_bc.unsqueeze(0).unsqueeze(0).type_as(u)
v_bc = self.v_bc.unsqueeze(0).unsqueeze(0).type_as(u)
p_bc = self.p_bc.unsqueeze(0).unsqueeze(0).type_as(u)
u = torch.where(bc1>=0.5, u_bc, u)
v = torch.where(bc2>=0.5, v_bc, v)
p = torch.where(bc3>=0.5, p_bc, p)
u_x_gp = self.gauss_pt_evaluation_der_x(u)
v_y_gp = self.gauss_pt_evaluation_der_y(v)
return u, v, p, u_x_gp, v_y_gp
def plot_contours(self, u, v, p, u_x_gp, v_y_gp):
fig, axs = plt.subplots(3, 3, figsize=(4*3,2.4*3),
subplot_kw={'aspect': 'auto'}, squeeze=True)
for i in range(axs.shape[0]-1):
for j in range(axs.shape[1]):
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
div_gp = u_x_gp + v_y_gp
div_elmwise = torch.sum(div_gp, 0)
div_total = torch.sum(div_elmwise)
interp_method = 'bilinear'
im0 = axs[0,0].imshow(u,cmap='jet', origin='lower', interpolation=interp_method)
fig.colorbar(im0, ax=axs[0,0]); axs[0,0].set_title(r'$u_x$')
im1 = axs[0,1].imshow(v,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im1, ax=axs[0,1]); axs[0,1].set_title(r'$u_y$')
im2 = axs[0,2].imshow(p,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im2, ax=axs[0,2]); axs[0,2].set_title(r'$p$')
im3 = axs[1,0].imshow(div_elmwise,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im3, ax=axs[1,0]); axs[1,0].set_title(r'$\int(\nabla\cdot u) d\Omega = $' + '{:.3e}'.format(div_total.item()))
im4 = axs[1,1].imshow((u**2 + v**2)**0.5,cmap='jet',origin='lower', interpolation=interp_method)
fig.colorbar(im4, ax=axs[1,1]); axs[1,1].set_title(r'$\sqrt{u_x^2+u_y^2}$')
x = np.linspace(0, 1, self.domain_sizeX)
y = np.linspace(0, 1, self.domain_sizeY)
xx , yy = np.meshgrid(x, y)
im5 = axs[1,2].streamplot(xx, yy, u, v, color='k', cmap='jet'); axs[1,2].set_title("Streamlines")
mid_idxX = int(self.domain_sizeX/2)
mid_idxY = int(self.domain_sizeY/2)
im = axs[2,0].plot(self.dataset.y[:,mid_idxX], u[:,mid_idxX],label='DiffNet')
im = axs[2,0].plot(self.midline_Y,self.midline_U,label='Numerical')
axs[2,0].set_xlabel('y'); axs[2,0].legend(); axs[2,0].set_title(r'$u_x @ x=0.5$')
im = axs[2,1].plot(self.dataset.x[mid_idxY,:], v[mid_idxY,:],label='DiffNet')
im = axs[2,1].plot(self.midline_X,self.midline_V,label='Numerical')
axs[2,1].set_xlabel('x'); axs[2,1].legend(); axs[2,1].set_title(r'$u_y @ y=0.5$')
im = axs[2,2].plot(self.dataset.x[-1,:], p[-1,:],label='DiffNet')
im = axs[2,2].plot(self.midline_X,self.topline_P,label='Numerical')
axs[2,2].set_xlabel('x'); axs[2,2].legend(); axs[2,2].set_title(r'$p @ y=1.0$')
fig.suptitle("Re = {:.1f}, Nx = {}, Ny = {}, LR = {:.1e}".format(self.Re, self.domain_sizeX, self.domain_sizeY, self.learning_rate), fontsize=12)
plt.savefig(os.path.join(self.logger[0].log_dir, 'contour_' + str(self.current_epoch) + '.png'))
self.logger[0].experiment.add_figure('Contour Plots', fig, self.current_epoch)
plt.close('all')
# def main():
# lx = 1.
# ly = 1.
# Nx = 64
# Ny = 64
# domain_size = 64
# Re = 200.
# dir_string = "ns_ldc"
# max_epochs = 1001
# plot_frequency = 20
# LR = 5e-3
# opt_switch_epochs = max_epochs
# load_from_prev = False
# load_version_id = 37
# x = np.linspace(0, 1, Nx)
# y = np.linspace(0, 1, Ny)
# xx , yy = np.meshgrid(x, y)
# dataset = NS_LDC_Dataset(domain_lengths=(lx,ly), domain_sizes=(Nx,Ny), Re=Re)
# if load_from_prev:
# print("LOADING FROM PREVIOUS VERSION: ", load_version_id)
# case_dir = './ns_ldc/version_'+str(load_version_id)
# net_u = torch.load(os.path.join(case_dir, 'net_u.pt'))
# net_v = torch.load(os.path.join(case_dir, 'net_v.pt'))
# net_p = torch.load(os.path.join(case_dir, 'net_p.pt'))
# else:
# print("INITIALIZING PARAMETERS TO ZERO")
# v1 = np.zeros_like(dataset.x)
# v2 = np.zeros_like(dataset.x)
# p = np.zeros_like(dataset.x)
# u_tensor = np.expand_dims(np.array([v1,v2,p]),0)
# # network = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor), requires_grad=True)])
# net_u = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor[:,0:1,:,:]), requires_grad=True)])
# net_v = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor[:,1:2,:,:]), requires_grad=True)])
# net_p = torch.nn.ParameterList([torch.nn.Parameter(torch.FloatTensor(u_tensor[:,2:3,:,:]), requires_grad=True)])
# network = (net_u, net_v, net_p)
# basecase = NS_LDC(network, dataset, domain_lengths=(lx,ly), domain_sizes=(Nx,Ny), batch_size=1, fem_basis_deg=1, learning_rate=LR, plot_frequency=plot_frequency)
# # Initialize trainer
# logger = pl.loggers.TensorBoardLogger('.', name=dir_string)
# csv_logger = pl.loggers.CSVLogger(logger.save_dir, name=logger.name, version=logger.version)
# early_stopping = pl.callbacks.early_stopping.EarlyStopping('loss',
# min_delta=1e-8, patience=10, verbose=False, mode='max', strict=True)
# checkpoint = pl.callbacks.model_checkpoint.ModelCheckpoint(monitor='loss',
# dirpath=logger.log_dir, filename='{epoch}-{step}',
# mode='min', save_last=True)
# lbfgs_switch = OptimSwitchLBFGS(epochs=opt_switch_epochs)
# trainer = Trainer(gpus=[0],callbacks=[early_stopping,lbfgs_switch],
# checkpoint_callback=checkpoint, logger=[logger,csv_logger],
# max_epochs=max_epochs, deterministic=True, profiler="simple")
# # Training
# trainer.fit(basecase)
# # Save network
# torch.save(basecase.net_u, os.path.join(logger.log_dir, 'net_u.pt'))
# torch.save(basecase.net_v, os.path.join(logger.log_dir, 'net_v.pt'))
# torch.save(basecase.net_p, os.path.join(logger.log_dir, 'net_p.pt'))
# if __name__ == '__main__':
# main() | 40.461412 | 184 | 0.60444 |
f7bd804d2a4e919321cad80c6b2174a0dbedda7f | 504 | py | Python | rapdevpy/database/sql_alchemy_tables.py | MislavJaksic/rapdevpy | bf03c151ed00b88f89d3f6fbfaeed2348fe1e9f7 | [
"MIT"
] | null | null | null | rapdevpy/database/sql_alchemy_tables.py | MislavJaksic/rapdevpy | bf03c151ed00b88f89d3f6fbfaeed2348fe1e9f7 | [
"MIT"
] | null | null | null | rapdevpy/database/sql_alchemy_tables.py | MislavJaksic/rapdevpy | bf03c151ed00b88f89d3f6fbfaeed2348fe1e9f7 | [
"MIT"
] | null | null | null | from typing import Dict
from sqlalchemy import MetaData, Table
class SqlAlchemyTables:
metadata: MetaData
tables: Dict[str, Table]
def __init__(self, metadata: MetaData):
self.metadata = metadata
self.tables = {}
def __getitem__(self, key: str) -> Table:
return self.tables[key]
def __len__(self) -> int:
return len(self.tables)
def add_table(self, table: str, *args) -> None:
self.tables[table] = Table(table, self.metadata, *args)
| 22.909091 | 63 | 0.646825 |
6f6a9d02a9b5e98208206d279fa0e2f5e072f4bb | 22,688 | py | Python | samsungctl/application.py | xxKeoxx/samsungctl | 3fe02e20e7d2fb322a41b537f0f7cc7935d8c44d | [
"MIT"
] | null | null | null | samsungctl/application.py | xxKeoxx/samsungctl | 3fe02e20e7d2fb322a41b537f0f7cc7935d8c44d | [
"MIT"
] | null | null | null | samsungctl/application.py | xxKeoxx/samsungctl | 3fe02e20e7d2fb322a41b537f0f7cc7935d8c44d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import threading
import base64
import requests
# {
# "isLock": false,
# "name": "Deezer",
# "appType": "web_app",
# "position": 13,
# "appId": 3201608010191,
# "launcherType": "launcher",
# "action_type": null,
# "mbrIndex": null,
# "accelerators": [
# {
# "appDatas": [
# {
# "isPlayable": 1,
# "icon": "/opt/usr/home/owner/share/DownloadManager/cexr1qp97S/140696",
# "subtitle": null,
# "appType": "pxdb_app",
# "title": "Rap Bangers",
# "appId": "cexr1qp97S.Deezer",
# "display_from": null,
# "action_play_url": {
# "picture": "http://api.deezer.com/playlist/1996494362/image",
# "md5_image": "882ef931a7a428e7a2b503f530df4ed2",
# "picture_medium": "http://cdn-images.deezer.com/images/playlist/882ef931a7a428e7a2b503f530df4ed2/250x250-000000-80-0-0.jpg",
# "nb_tracks": 60,
# "title": "Rap Bangers",
# "checksum": "cad64684448e832c67aa7d103fafb63c",
# "tracklist": "http://api.deezer.com/playlist/1996494362/tracks",
# "creation_date": "2016-07-05 14:34:42",
# "public": true,
# "link": "http://www.deezer.com/playlist/1996494362",
# "user": {
# "tracklist": "http://api.deezer.com/user/917475151/flow",
# "type": "user",
# "id": 917475151,
# "name": "Mehdi Rap Editor"
# },
# "picture_small": "http://cdn-images.deezer.com/images/playlist/882ef931a7a428e7a2b503f530df4ed2/56x56-000000-80-0-0.jpg",
# "picture_xl": "http://cdn-images.deezer.com/images/playlist/882ef931a7a428e7a2b503f530df4ed2/1000x1000-000000-80-0-0.jpg",
# "type": "playlist",
# "id": 1996494362,
# "picture_big": "http://cdn-images.deezer.com/images/playlist/882ef931a7a428e7a2b503f530df4ed2/500x500-000000-80-0-0.jpg"
# },
# "liveLauncherType": "",
# "serviceId": "",
# "launcherType": "launcher",
# "action_type": "APP_LAUNCH",
# "mbrIndex": -2,
# "sourceTypeNum": 0,
# "display_until": null,
# "mbrSource": 0,
# "id": 140696,
# "subtitle3": "",
# "subtitle2": ""
# },
# {
# "isPlayable": 1,
# "icon": "/opt/usr/home/owner/share/DownloadManager/cexr1qp97S/140692",
# "subtitle": null,
# "appType": "pxdb_app",
# "title": "Best Of Rap Bangers 2018",
# "appId": "cexr1qp97S.Deezer",
# "display_from": null,
# "action_play_url": {
# "picture": "http://api.deezer.com/playlist/5171651864/image",
# "md5_image": "de9dee5f90f4ebf0c9c5ba2e69f42691",
# "picture_medium": "http://cdn-images.deezer.com/images/playlist/de9dee5f90f4ebf0c9c5ba2e69f42691/250x250-000000-80-0-0.jpg",
# "nb_tracks": 52,
# "title": "Best Of Rap Bangers 2018",
# "checksum": "e7120c1c76d9dfa4fa7a4a3de4693dc3",
# "tracklist": "http://api.deezer.com/playlist/5171651864/tracks",
# "creation_date": "2018-12-02 16:53:54",
# "public": true,
# "link": "http://www.deezer.com/playlist/5171651864",
# "user": {
# "tracklist": "http://api.deezer.com/user/917475151/flow",
# "type": "user",
# "id": 917475151,
# "name": "Mehdi Rap Editor"
# },
# "picture_small": "http://cdn-images.deezer.com/images/playlist/de9dee5f90f4ebf0c9c5ba2e69f42691/56x56-000000-80-0-0.jpg",
# "picture_xl": "http://cdn-images.deezer.com/images/playlist/de9dee5f90f4ebf0c9c5ba2e69f42691/1000x1000-000000-80-0-0.jpg",
# "type": "playlist",
# "id": 5171651864,
# "picture_big": "http://cdn-images.deezer.com/images/playlist/de9dee5f90f4ebf0c9c5ba2e69f42691/500x500-000000-80-0-0.jpg"
# },
# "liveLauncherType": "",
# "serviceId": "",
# "launcherType": "launcher",
# "action_type": "APP_LAUNCH",
# "mbrIndex": -2,
# "sourceTypeNum": 0,
# "display_until": null,
# "mbrSource": 0,
# "id": 140692,
# "subtitle3": "",
# "subtitle2": ""
# },
# {
# "isPlayable": 1,
# "icon": "/opt/usr/home/owner/share/DownloadManager/cexr1qp97S/140693",
# "subtitle": null,
# "appType": "pxdb_app",
# "title": "Christmas Pop",
# "appId": "cexr1qp97S.Deezer",
# "display_from": null,
# "action_play_url": {
# "picture": "http://api.deezer.com/playlist/3833591862/image",
# "md5_image": "9c27b60ecdd08c224218610126a86453",
# "picture_medium": "http://cdn-images.deezer.com/images/playlist/9c27b60ecdd08c224218610126a86453/250x250-000000-80-0-0.jpg",
# "nb_tracks": 60,
# "title": "Christmas Pop",
# "checksum": "03ba236386f9474a0227414e4f48d73c",
# "tracklist": "http://api.deezer.com/playlist/3833591862/tracks",
# "creation_date": "2017-11-23 15:40:17",
# "public": true,
# "link": "http://www.deezer.com/playlist/3833591862",
# "user": {
# "tracklist": "http://api.deezer.com/user/753546365/flow",
# "type": "user",
# "id": 753546365,
# "name": "Dom - Pop Music Editor"
# },
# "picture_small": "http://cdn-images.deezer.com/images/playlist/9c27b60ecdd08c224218610126a86453/56x56-000000-80-0-0.jpg",
# "picture_xl": "http://cdn-images.deezer.com/images/playlist/9c27b60ecdd08c224218610126a86453/1000x1000-000000-80-0-0.jpg",
# "type": "playlist",
# "id": 3833591862,
# "picture_big": "http://cdn-images.deezer.com/images/playlist/9c27b60ecdd08c224218610126a86453/500x500-000000-80-0-0.jpg"
# },
# "liveLauncherType": "",
# "serviceId": "",
# "launcherType": "launcher",
# "action_type": "APP_LAUNCH",
# "mbrIndex": -2,
# "sourceTypeNum": 0,
# "display_until": null,
# "mbrSource": 0,
# "id": 140693,
# "subtitle3": "",
# "subtitle2": ""
# },
# {
# "isPlayable": 1,
# "icon": "/opt/usr/home/owner/share/DownloadManager/cexr1qp97S/140694",
# "subtitle": null,
# "appType": "pxdb_app",
# "title": "2018's Biggest Hits",
# "appId": "cexr1qp97S.Deezer",
# "display_from": null,
# "action_play_url": {
# "picture": "http://api.deezer.com/playlist/1283499335/image",
# "md5_image": "33688a8b06bf539cb5d1d07be5816fa0",
# "picture_medium": "http://cdn-images.deezer.com/images/playlist/33688a8b06bf539cb5d1d07be5816fa0/250x250-000000-80-0-0.jpg",
# "nb_tracks": 60,
# "title": "2018's Biggest Hits",
# "checksum": "8dfeeddf33931c5a66cc89931bf57f55",
# "tracklist": "http://api.deezer.com/playlist/1283499335/tracks",
# "creation_date": "2015-06-26 15:27:47",
# "public": true,
# "link": "http://www.deezer.com/playlist/1283499335",
# "user": {
# "tracklist": "http://api.deezer.com/user/753546365/flow",
# "type": "user",
# "id": 753546365,
# "name": "Dom - Pop Music Editor"
# },
# "picture_small": "http://cdn-images.deezer.com/images/playlist/33688a8b06bf539cb5d1d07be5816fa0/56x56-000000-80-0-0.jpg",
# "picture_xl": "http://cdn-images.deezer.com/images/playlist/33688a8b06bf539cb5d1d07be5816fa0/1000x1000-000000-80-0-0.jpg",
# "type": "playlist",
# "id": 1283499335,
# "picture_big": "http://cdn-images.deezer.com/images/playlist/33688a8b06bf539cb5d1d07be5816fa0/500x500-000000-80-0-0.jpg"
# },
# "liveLauncherType": "",
# "serviceId": "",
# "launcherType": "launcher",
# "action_type": "APP_LAUNCH",
# "mbrIndex": -2,
# "sourceTypeNum": 0,
# "display_until": null,
# "mbrSource": 0,
# "id": 140694,
# "subtitle3": "",
# "subtitle2": ""
# },
# {
# "isPlayable": 1,
# "icon": "/opt/usr/home/owner/share/DownloadManager/cexr1qp97S/140695",
# "subtitle": null,
# "appType": "pxdb_app",
# "title": "Hits of the Moment",
# "appId": "cexr1qp97S.Deezer",
# "display_from": null,
# "action_play_url": {
# "picture": "http://api.deezer.com/playlist/2098157264/image",
# "md5_image": "b3924470ee53c1180913e06d3cfd006b",
# "picture_medium": "http://cdn-images.deezer.com/images/playlist/b3924470ee53c1180913e06d3cfd006b/250x250-000000-80-0-0.jpg",
# "nb_tracks": 60,
# "title": "Hits of the Moment",
# "checksum": "8dfe26c3e3a7f6ec8257b46901fa3a28",
# "tracklist": "http://api.deezer.com/playlist/2098157264/tracks",
# "creation_date": "2016-08-04 15:42:22",
# "public": true,
# "link": "http://www.deezer.com/playlist/2098157264",
# "user": {
# "tracklist": "http://api.deezer.com/user/753546365/flow",
# "type": "user",
# "id": 753546365,
# "name": "Dom - Pop Music Editor"
# },
# "picture_small": "http://cdn-images.deezer.com/images/playlist/b3924470ee53c1180913e06d3cfd006b/56x56-000000-80-0-0.jpg",
# "picture_xl": "http://cdn-images.deezer.com/images/playlist/b3924470ee53c1180913e06d3cfd006b/1000x1000-000000-80-0-0.jpg",
# "type": "playlist",
# "id": 2098157264,
# "picture_big": "http://cdn-images.deezer.com/images/playlist/b3924470ee53c1180913e06d3cfd006b/500x500-000000-80-0-0.jpg"
# },
# "liveLauncherType": "",
# "serviceId": "",
# "launcherType": "launcher",
# "action_type": "APP_LAUNCH",
# "mbrIndex": -2,
# "sourceTypeNum": 0,
# "display_until": null,
# "mbrSource": 0,
# "id": 140695,
# "subtitle3": "",
# "subtitle2": ""
# }
# ],
# "title": "featured"
# }
# ],
# {
# "is_lock": 0,
# "icon": "/opt/share/webappservice/apps_icon/FirstScreen/111299001912/250x250.png",
# "app_type": 2,
# "name": "YouTube",
# "appId": "111299001912"
# },
class Application(object):
def __init__(
self,
remote,
name,
isLock=None,
is_lock=None,
appType=None,
app_type=None,
position=None,
appId=None,
launcherType=None,
action_type=None,
mbrIndex=None,
accelerators=None,
sourceTypeNum=None,
icon=None,
id=None,
mbrSource=None
):
self._remote = remote
self._is_lock = isLock
self.name = name
self.app_type = app_type
self.position = position
self.app_id = appId
self.launcher_type = launcherType
self.mbr_index = mbrIndex
if accelerators is not None:
self._accelerators = accelerators
else:
self._accelerators = []
self.source_type_num = sourceTypeNum
self._icon = icon
self.id = id
self.mbr_source = mbrSource
@property
def action_type(self):
if self.app_type == 2:
return 'DEEP_LINK'
else:
return 'NATIVE_LAUNCH'
@property
def version(self):
url = 'http://{0}:8001/api/v2/applications/{1}'.format(
self._remote.config['host'],
self.app_id
)
response = requests.get(url)
try:
response = response.json()
except:
return 'Unknown'
if 'version' not in response:
return 'Unknown'
return response['version']
@property
def is_visible(self):
url = 'http://{0}:8001/api/v2/applications/{1}'.format(
self._remote.config['host'],
self.app_id
)
response = requests.get(url)
try:
response = response.json()
except:
return None
if 'visible' not in response:
return None
return response['visible']
@property
def is_running(self):
url = 'http://{0}:8001/api/v2/applications/{1}'.format(
self._remote.config['host'],
self.app_id
)
response = requests.get(url)
try:
response = response.json()
except:
return None
if 'running' not in response:
return None
return response['running']
def run(self, meta_tag=None):
params = dict(
event='ed.apps.launch',
to='host',
data=dict(
appId=self.app_id,
action_type=self.action_type
)
)
if meta_tag is not None:
params['data']['metaTag'] = meta_tag
self._remote.send('ms.channel.emit', **params)
@property
def is_lock(self):
return bool(self._is_lock)
def __iter__(self):
for accelerator in self._accelerators:
yield Accelerator(self._remote, **accelerator)
@property
def icon(self):
if self._icon:
params = dict(
event="ed.apps.icon",
to="host",
data=dict(iconPath=self._icon)
)
icon = [None]
event = threading.Event()
def callback(data):
data = data['imageBase64']
if data is not None:
data = base64.decodestring(data)
icon[0] = data
event.set()
self._remote.register_receive_callback(
callback,
'iconPath',
self._icon
)
self._remote.send("ms.channel.emit", **params)
event.wait(3.0)
return icon[0]
class Accelerator(object):
def __init__(self, remote, title, appDatas):
self._remote = remote
self.title = title
self._app_datas = appDatas
def __iter__(self):
for app_data in self._app_datas:
yield AppData(self._remote, **app_data)
class AppData(object):
def __init__(
self,
remote,
isPlayable=None,
subtitle=None,
appType=None,
title=None,
mbrIndex=None,
liveLauncherType=None,
action_play_url=None,
serviceId=None,
launcherType=None,
sourceTypeNum=None,
action_type=None,
appId=None,
subtitle2=None,
display_from=None,
display_until=None,
mbrSource=0,
id=None,
subtitle3=None,
icon=None
):
self._remote = remote
self._is_playable = isPlayable
self.subtitle = subtitle
self.app_type = appType
self.title = title
self.mbr_index = mbrIndex
self.live_launcher_type = liveLauncherType
self.action_play_url = action_play_url
self.service_id = serviceId
self.launcher_type = launcherType
self.source_type_num = sourceTypeNum
self.action_type = action_type
self.app_id = appId
self.subtitle2 = subtitle2
self.display_from = display_from
self.display_until = display_until
self.mbr_source = mbrSource
self.id = id
self.subtitle3 = subtitle3
self._icon = icon
@property
def is_playable(self):
return bool(self._is_playable)
def run(self):
if self.is_playable and self.action_type:
params = dict(
event='ed.apps.launch',
to='host',
data=dict(
appId=self.app_id,
action_type=self.action_type
)
)
if self.action_play_url:
params['data']['metaTag'] = self.action_play_url
self._remote.send('ms.channel.emit', **params)
@property
def icon(self):
if self._icon:
params = dict(
event="ed.apps.icon",
to="host",
data=dict(iconPath=self._icon)
)
icon = [None]
event = threading.Event()
def callback(data):
data = data['imageBase64']
if data is not None:
data = base64.decodestring(data)
icon[0] = data
event.set()
self._remote.register_receive_callback(
callback,
'iconPath',
self._icon
)
self._remote.send("ms.channel.emit", **params)
event.wait(3.0)
return icon[0]
| 43.883946 | 162 | 0.403914 |
8d7cad9cdc8441245df53fdda021a080aa9cafb2 | 218 | py | Python | cyder/cydns/tests/all.py | ngokevin/cyder | 8bc0e4aea9ec4b7ac9260b083839bbb1174441d3 | [
"BSD-3-Clause"
] | 1 | 2016-07-06T13:00:53.000Z | 2016-07-06T13:00:53.000Z | cyder/cydns/tests/all.py | ngokevin/cyder | 8bc0e4aea9ec4b7ac9260b083839bbb1174441d3 | [
"BSD-3-Clause"
] | null | null | null | cyder/cydns/tests/all.py | ngokevin/cyder | 8bc0e4aea9ec4b7ac9260b083839bbb1174441d3 | [
"BSD-3-Clause"
] | null | null | null | from cyder.cydns.tests.model_tests import *
from cyder.cydns.domain.view_tests import *
from cyder.cydns.reverse_domain.view_tests import *
from cyder.cydns.soa.view_tests import *
from cyder.cydns.view_tests import *
| 36.333333 | 51 | 0.821101 |
8f2e11ee08f5deb7d04487792cb63a10d9c21c09 | 1,175 | py | Python | MyTuts/String.py | OmkarPathak/My-Personal-Website | 45df735662f45f6d5ad8bbbcbbb4ae31134fb86b | [
"MIT"
] | 9 | 2018-01-26T19:52:55.000Z | 2021-03-19T02:50:43.000Z | MyTuts/String.py | OmkarPathak/My-Personal-Website | 45df735662f45f6d5ad8bbbcbbb4ae31134fb86b | [
"MIT"
] | 1 | 2020-10-02T03:34:08.000Z | 2020-10-02T03:34:08.000Z | MyTuts/String.py | OmkarPathak/My-Personal-Website | 45df735662f45f6d5ad8bbbcbbb4ae31134fb86b | [
"MIT"
] | 14 | 2018-01-18T13:02:37.000Z | 2021-06-16T16:12:43.000Z | # Author: OMKAR PATHAK
# NOTE: This tutorial is based for Python 3
# In this example we would see some operations that can be performed
# on Python data type: Strings
myString = 'hello' # Assigning a string to a variable
# h => 0
# e => 1
# l => 2
# l => 3
# o => 4
print(myString[0], myString[1])
print(myString[2] + 'abc')
# String Operations
result = myString.capitalize() # capitalize the first letter of the string
print(myString) # 'Hello'
result = myString.count('l') # count the occurances of the letter l
print(myString) # 2
result = myString.islower() # check if the string contains ALL lowercase letters
print(result) # True
result = myString.replace('l', 'a') # replace all occurances of 'l' with 'm'
print(result)
result = myString.index('h') # check the index for letter 'h' in the string
print(result) # 0
result = myString.upper() # change the WHOLE string to uppercase
print(result)
result = myString.lower() # change the WHOLE string to lowercase
print(result)
# There are many such operations
# For full list of operations fire below command at the interactive shell of Python
# COMMAND: dir(str) | 26.704545 | 83 | 0.690213 |
d59726787c8859dacebe118cdc4606bde8efda40 | 9,326 | py | Python | tests/integration/modules/test_ssh.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | tests/integration/modules/test_ssh.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/integration/modules/test_ssh.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # -*- coding: utf-8 -*-
'''
Test the ssh module
'''
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
import shutil
# Import Salt Testing libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.case import ModuleCase
from tests.support.helpers import skip_if_binaries_missing
# Import salt libs
import salt.utils.files
import salt.utils.platform
# Import 3rd-party libs
from salt.ext.tornado.httpclient import HTTPClient
GITHUB_FINGERPRINT = '9d:38:5b:83:a9:17:52:92:56:1a:5e:c4:d4:81:8e:0a:ca:51:a2:64:f1:74:20:11:2e:f8:8a:c3:a1:39:49:8f'
def check_status():
'''
Check the status of Github for remote operations
'''
try:
return HTTPClient().fetch('http://github.com').code == 200
except Exception: # pylint: disable=broad-except
return False
@skip_if_binaries_missing(['ssh', 'ssh-keygen'], check_all=True)
class SSHModuleTest(ModuleCase):
'''
Test the ssh module
'''
@classmethod
def setUpClass(cls):
cls.subsalt_dir = os.path.join(RUNTIME_VARS.TMP, 'subsalt')
cls.authorized_keys = os.path.join(cls.subsalt_dir, 'authorized_keys')
cls.known_hosts = os.path.join(cls.subsalt_dir, 'known_hosts')
def setUp(self):
'''
Set up the ssh module tests
'''
if not check_status():
self.skipTest('External source, github.com is down')
super(SSHModuleTest, self).setUp()
if not os.path.isdir(self.subsalt_dir):
os.makedirs(self.subsalt_dir)
ssh_raw_path = os.path.join(RUNTIME_VARS.FILES, 'ssh', 'raw')
with salt.utils.files.fopen(ssh_raw_path) as fd:
self.key = fd.read().strip()
def tearDown(self):
'''
Tear down the ssh module tests
'''
if os.path.isdir(self.subsalt_dir):
shutil.rmtree(self.subsalt_dir)
super(SSHModuleTest, self).tearDown()
del self.key
def test_auth_keys(self):
'''
test ssh.auth_keys
'''
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, 'ssh', 'authorized_keys'),
self.authorized_keys)
user = 'root'
if salt.utils.platform.is_windows():
user = 'Administrator'
ret = self.run_function('ssh.auth_keys', [user, self.authorized_keys])
self.assertEqual(len(list(ret.items())), 1) # exactly one key is found
key_data = list(ret.items())[0][1]
try:
self.assertEqual(key_data['comment'], 'github.com')
self.assertEqual(key_data['enc'], 'ssh-rsa')
self.assertEqual(
key_data['options'], ['command="/usr/local/lib/ssh-helper"']
)
self.assertEqual(key_data['fingerprint'], GITHUB_FINGERPRINT)
except AssertionError as exc:
raise AssertionError(
'AssertionError: {0}. Function returned: {1}'.format(
exc, ret
)
)
def test_bad_enctype(self):
'''
test to make sure that bad key encoding types don't generate an
invalid key entry in authorized_keys
'''
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, 'ssh', 'authorized_badkeys'),
self.authorized_keys)
ret = self.run_function('ssh.auth_keys', ['root', self.authorized_keys])
# The authorized_badkeys file contains a key with an invalid ssh key
# encoding (dsa-sha2-nistp256 instead of ecdsa-sha2-nistp256)
# auth_keys should skip any keys with invalid encodings. Internally
# the minion will throw a CommandExecutionError so the
# user will get an indicator of what went wrong.
self.assertEqual(len(list(ret.items())), 0) # Zero keys found
def test_get_known_host_entries(self):
'''
Check that known host information is returned from ~/.ssh/config
'''
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, 'ssh', 'known_hosts'),
self.known_hosts)
arg = ['root', 'github.com']
kwargs = {'config': self.known_hosts}
ret = self.run_function('ssh.get_known_host_entries', arg, **kwargs)[0]
try:
self.assertEqual(ret['enc'], 'ssh-rsa')
self.assertEqual(ret['key'], self.key)
self.assertEqual(ret['fingerprint'], GITHUB_FINGERPRINT)
except AssertionError as exc:
raise AssertionError(
'AssertionError: {0}. Function returned: {1}'.format(
exc, ret
)
)
def test_recv_known_host_entries(self):
'''
Check that known host information is returned from remote host
'''
ret = self.run_function('ssh.recv_known_host_entries', ['github.com'])
try:
self.assertNotEqual(ret, None)
self.assertEqual(ret[0]['enc'], 'ssh-rsa')
self.assertEqual(ret[0]['key'], self.key)
self.assertEqual(ret[0]['fingerprint'], GITHUB_FINGERPRINT)
except AssertionError as exc:
raise AssertionError(
'AssertionError: {0}. Function returned: {1}'.format(
exc, ret
)
)
def test_check_known_host_add(self):
'''
Check known hosts by its fingerprint. File needs to be updated
'''
arg = ['root', 'github.com']
kwargs = {'fingerprint': GITHUB_FINGERPRINT, 'config': self.known_hosts}
ret = self.run_function('ssh.check_known_host', arg, **kwargs)
self.assertEqual(ret, 'add')
def test_check_known_host_update(self):
'''
ssh.check_known_host update verification
'''
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, 'ssh', 'known_hosts'),
self.known_hosts)
arg = ['root', 'github.com']
kwargs = {'config': self.known_hosts}
# wrong fingerprint
ret = self.run_function('ssh.check_known_host', arg,
**dict(kwargs, fingerprint='aa:bb:cc:dd'))
self.assertEqual(ret, 'update')
# wrong keyfile
ret = self.run_function('ssh.check_known_host', arg,
**dict(kwargs, key='YQ=='))
self.assertEqual(ret, 'update')
def test_check_known_host_exists(self):
'''
Verify check_known_host_exists
'''
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, 'ssh', 'known_hosts'),
self.known_hosts)
arg = ['root', 'github.com']
kwargs = {'config': self.known_hosts}
# wrong fingerprint
ret = self.run_function('ssh.check_known_host', arg,
**dict(kwargs, fingerprint=GITHUB_FINGERPRINT))
self.assertEqual(ret, 'exists')
# wrong keyfile
ret = self.run_function('ssh.check_known_host', arg,
**dict(kwargs, key=self.key))
self.assertEqual(ret, 'exists')
def test_rm_known_host(self):
'''
ssh.rm_known_host
'''
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, 'ssh', 'known_hosts'),
self.known_hosts)
arg = ['root', 'github.com']
kwargs = {'config': self.known_hosts, 'key': self.key}
# before removal
ret = self.run_function('ssh.check_known_host', arg, **kwargs)
self.assertEqual(ret, 'exists')
# remove
self.run_function('ssh.rm_known_host', arg, config=self.known_hosts)
# after removal
ret = self.run_function('ssh.check_known_host', arg, **kwargs)
self.assertEqual(ret, 'add')
def test_set_known_host(self):
'''
ssh.set_known_host
'''
# add item
ret = self.run_function('ssh.set_known_host', ['root', 'github.com'],
config=self.known_hosts)
try:
self.assertEqual(ret['status'], 'updated')
self.assertEqual(ret['old'], None)
self.assertEqual(ret['new'][0]['fingerprint'], GITHUB_FINGERPRINT)
except AssertionError as exc:
raise AssertionError(
'AssertionError: {0}. Function returned: {1}'.format(
exc, ret
)
)
# check that item does exist
ret = self.run_function('ssh.get_known_host_entries', ['root', 'github.com'],
config=self.known_hosts)[0]
try:
self.assertEqual(ret['fingerprint'], GITHUB_FINGERPRINT)
except AssertionError as exc:
raise AssertionError(
'AssertionError: {0}. Function returned: {1}'.format(
exc, ret
)
)
# add the same item once again
ret = self.run_function('ssh.set_known_host', ['root', 'github.com'],
config=self.known_hosts)
try:
self.assertEqual(ret['status'], 'exists')
except AssertionError as exc:
raise AssertionError(
'AssertionError: {0}. Function returned: {1}'.format(
exc, ret
)
)
| 36.716535 | 118 | 0.577204 |
b26e41ea6547663ef3b8200cd514d9483dbabf45 | 14,189 | py | Python | src/robotframework_ls/impl/ast_utils.py | Snooz82/robotframework-lsp | 5f6666968f59111a5c478afd54df055d23d7274c | [
"Apache-2.0"
] | null | null | null | src/robotframework_ls/impl/ast_utils.py | Snooz82/robotframework-lsp | 5f6666968f59111a5c478afd54df055d23d7274c | [
"Apache-2.0"
] | null | null | null | src/robotframework_ls/impl/ast_utils.py | Snooz82/robotframework-lsp | 5f6666968f59111a5c478afd54df055d23d7274c | [
"Apache-2.0"
] | null | null | null | import ast as ast_module
from robotframework_ls.lsp import Error
import sys
from collections import namedtuple
from robotframework_ls.robotframework_log import get_logger
log = get_logger(__name__)
class _NodesProviderVisitor(ast_module.NodeVisitor):
def __init__(self, on_node=lambda node: None):
ast_module.NodeVisitor.__init__(self)
self._stack = []
self.on_node = on_node
def generic_visit(self, node):
self._stack.append(node)
self.on_node(self._stack, node)
ast_module.NodeVisitor.generic_visit(self, node)
self._stack.pop()
class _PrinterVisitor(ast_module.NodeVisitor):
def __init__(self, stream):
ast_module.NodeVisitor.__init__(self)
self._level = 0
self._stream = stream
def _replace_spacing(self, txt):
curr_len = len(txt)
delta = 80 - curr_len
return txt.replace("*SPACING*", " " * delta)
def generic_visit(self, node):
# Note: prints line and col offsets 0-based (even if the ast is 1-based for
# lines and 0-based for columns).
self._level += 1
try:
indent = " " * self._level
node_lineno = node.lineno
if node_lineno != -1:
# Make 0-based
node_lineno -= 1
node_end_lineno = node.end_lineno
if node_end_lineno != -1:
# Make 0-based
node_end_lineno -= 1
self._stream.write(
self._replace_spacing(
"%s%s *SPACING* (%s, %s) -> (%s, %s)\n"
% (
indent,
node.__class__.__name__,
node_lineno,
node.col_offset,
node_end_lineno,
node.end_col_offset,
)
)
)
tokens = getattr(node, "tokens", [])
for token in tokens:
token_lineno = token.lineno
if token_lineno != -1:
# Make 0-based
token_lineno -= 1
self._stream.write(
self._replace_spacing(
"%s- %s, '%s' *SPACING* (%s, %s->%s)\n"
% (
indent,
token.type,
token.value.replace("\n", "\\n").replace("\r", "\\r"),
token_lineno,
token.col_offset,
token.end_col_offset,
)
)
)
ret = ast_module.NodeVisitor.generic_visit(self, node)
finally:
self._level -= 1
return ret
def collect_errors(node):
"""
:return list(Error)
"""
errors = []
for _stack, node in _iter_nodes_filtered(node, accept_class="Error"):
msg = node.error
errors.append(create_error_from_node(node, msg))
return errors
def create_error_from_node(node, msg, tokens=None):
"""
:return Error:
"""
if tokens is None:
tokens = node.tokens
if not tokens:
log.log("No tokens found when visiting %s." % (node.__class__,))
start = (0, 0)
end = (0, 0)
else:
# line is 1-based and col is 0-based (make both 0-based for the error).
start = (tokens[0].lineno - 1, tokens[0].col_offset)
end = (tokens[-1].lineno - 1, tokens[-1].end_col_offset)
error = Error(msg, start, end)
return error
def print_ast(node, stream=None):
if stream is None:
stream = sys.stderr
errors_visitor = _PrinterVisitor(stream)
errors_visitor.visit(node)
def find_section(node, line):
"""
:param line:
0-based
"""
last_section = None
for section in node.sections:
# section.lineno is 1-based.
if (section.lineno - 1) <= line:
last_section = section
else:
return last_section
return last_section
def _iter_nodes(node, stack=None, recursive=True):
"""
:note: the yielded stack is actually always the same (mutable) list, so,
clients that want to return it somewhere else should create a copy.
"""
if stack is None:
stack = []
for _field, value in ast_module.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast_module.AST):
yield stack, item
if recursive:
stack.append(item)
for o in _iter_nodes(item, stack, recursive=recursive):
yield o
stack.pop()
elif isinstance(value, ast_module.AST):
if recursive:
yield stack, value
stack.append(value)
for o in _iter_nodes(value, stack, recursive=recursive):
yield o
stack.pop()
_NodeInfo = namedtuple("_NodeInfo", "stack, node")
_TokenInfo = namedtuple("_TokenInfo", "stack, node, token")
_KeywordUsageInfo = namedtuple("_KeywordUsageInfo", "stack, node, token, name")
def find_token(ast, line, col):
"""
:rtype: robotframework_ls.impl.ast_utils._TokenInfo|NoneType
"""
for stack, node in _iter_nodes(ast):
try:
tokens = node.tokens
except AttributeError:
continue
for token in tokens:
lineno = token.lineno - 1
if lineno != line:
continue
if token.type == token.SEPARATOR:
# For separator tokens, it must be entirely within the section
# i.e.: if it's in the boundary for a word, we want the word,
# not the separator.
if token.col_offset < col < token.end_col_offset:
return _TokenInfo(tuple(stack), node, token)
else:
if token.col_offset <= col <= token.end_col_offset:
return _TokenInfo(tuple(stack), node, token)
def find_variable(ast, line, col):
token_info = find_token(ast, line, col)
if token_info is not None:
token = token_info.token
if token.type == token.ARGUMENT and "{" in token.value:
for part in _tokenize_variables_even_when_invalid(token, col):
if part.col_offset <= col <= part.end_col_offset:
if part.type == part.VARIABLE:
return _TokenInfo(token_info.stack, token_info.node, part)
else:
return None
else:
return None
return None
def tokenize_variables(token):
try:
return token.tokenize_variables()
except: # If variables aren't correct, this may fail, so, return no variables.
return iter([])
def _tokenize_variables_even_when_invalid(token, col):
"""
If Token.tokenize_variables() fails, this can still provide the variable under
the given column by appliying some heuristics to find open variables.
"""
try:
return token.tokenize_variables()
except:
pass
# If we got here, it means that we weren't able to tokenize the variables
# properly (probably some variable wasn't closed properly), so, let's do
# a custom implementation for this use case.
from robot.api import Token
from robotframework_ls.impl.robot_constants import VARIABLE_PREFIXES
diff = col - token.col_offset
up_to_cursor = token.value[:diff]
open_at = up_to_cursor.rfind("{")
if open_at >= 1:
if up_to_cursor[open_at - 1] in VARIABLE_PREFIXES:
varname = [up_to_cursor[open_at - 1 :]]
from_cursor = token.value[diff:]
for c in from_cursor:
if c in VARIABLE_PREFIXES or c.isspace() or c == "{":
break
if c == "}":
varname.append(c)
break
varname.append(c)
return [
Token(
type=token.VARIABLE,
value="".join(varname),
lineno=token.lineno,
col_offset=token.col_offset + open_at - 1,
error=token.error,
)
]
def _iter_nodes_filtered(ast, accept_class, recursive=True):
"""
:rtype: generator(tuple(list,ast_module.AST))
"""
if not isinstance(accept_class, (list, tuple, set)):
accept_class = (accept_class,)
for stack, node in _iter_nodes(ast, recursive=recursive):
if node.__class__.__name__ in accept_class:
yield stack, node
def iter_library_imports(ast):
"""
:rtype: generator(_NodeInfo)
"""
for stack, node in _iter_nodes_filtered(ast, accept_class="LibraryImport"):
yield _NodeInfo(tuple(stack), node)
def iter_resource_imports(ast):
"""
:rtype: generator(_NodeInfo)
"""
for stack, node in _iter_nodes_filtered(ast, accept_class="ResourceImport"):
yield _NodeInfo(tuple(stack), node)
def iter_keywords(ast):
"""
:rtype: generator(_NodeInfo)
"""
for stack, node in _iter_nodes_filtered(ast, accept_class="Keyword"):
yield _NodeInfo(tuple(stack), node)
def iter_variables(ast):
"""
:rtype: generator(_NodeInfo)
"""
for stack, node in _iter_nodes_filtered(ast, accept_class="Variable"):
yield _NodeInfo(tuple(stack), node)
def iter_keyword_arguments_as_str(ast):
"""
:rtype: generator(str)
"""
for _stack, node in _iter_nodes_filtered(ast, accept_class="Arguments"):
for token in node.tokens:
if token.type == token.ARGUMENT:
yield str(token)
def get_documentation(ast):
doc = []
for _stack, node in _iter_nodes_filtered(ast, accept_class="Documentation"):
for token in node.tokens:
if token.type == token.ARGUMENT:
doc.append(str(token).strip())
return "\n".join(doc)
def iter_variable_assigns(ast):
from robot.api import Token
for stack, node in _iter_nodes(ast, recursive=False):
if node.__class__.__name__ == "KeywordCall":
for token in node.get_tokens(Token.ASSIGN):
value = token.value
i = value.rfind("}")
if i > 0:
new_value = value[: i + 1]
token = Token(
type=token.type,
value=new_value,
lineno=token.lineno,
col_offset=token.col_offset,
error=token.error,
)
yield _TokenInfo(tuple(stack), node, token)
def iter_keyword_usage_tokens(ast):
"""
Iterates through all the places where a keyword name is being used, providing
the stack, node, token and name.
:return: generator(_KeywordUsageInfo)
:note: this goes hand-in-hand with get_keyword_name_token.
"""
from robot.api import Token
from robotframework_ls._utils import isinstance_name
for stack, node in _iter_nodes(ast, recursive=True):
if node.__class__.__name__ == "KeywordCall":
token = _strip_token_bdd_prefix(node.get_token(Token.KEYWORD))
node = _copy_of_node_replacing_token(node, token, Token.KEYWORD)
keyword_name = token.value
yield _KeywordUsageInfo(tuple(stack), node, token, keyword_name)
elif isinstance_name(node, ("Fixture", "TestTemplate")):
node, token = _strip_node_and_token_bdd_prefix(node, Token.NAME)
keyword_name = token.value
yield _KeywordUsageInfo(tuple(stack), node, token, keyword_name)
def get_keyword_name_token(ast, token):
"""
:note: this goes hand-in-hand with iter_keyword_usage_tokens.
"""
from robotframework_ls._utils import isinstance_name
if token.type == token.KEYWORD or (
token.type == token.NAME and isinstance_name(ast, ("Fixture", "TestTemplate"))
):
return _strip_token_bdd_prefix(token)
return None
def _copy_of_node_replacing_token(node, token, token_type):
"""
Workaround to create a new version of the same node but with the first
occurrence of a token of the given type changed to another token.
"""
new_tokens = list(node.tokens)
for i, t in enumerate(new_tokens):
if t.type == token_type:
new_tokens[i] = token
break
return node.__class__(new_tokens)
def _strip_node_and_token_bdd_prefix(node, token_type):
"""
This is a workaround because the parsing does not separate a BDD prefix from
the keyword name. If the parsing is improved to do that separation in the future
we can stop doing this.
"""
original_token = node.get_token(token_type)
token = _strip_token_bdd_prefix(original_token)
if token is original_token:
# i.e.: No change was done.
return node, token
return _copy_of_node_replacing_token(node, token, token_type), token
def _strip_token_bdd_prefix(token):
"""
This is a workaround because the parsing does not separate a BDD prefix from
the keyword name. If the parsing is improved to do that separation in the future
we can stop doing this.
:return Token:
Returns a new token with the bdd prefix stripped or the original token passed.
"""
from robotframework_ls.impl.robot_constants import BDD_PREFIXES
from robot.api import Token
from robotframework_ls.impl.text_utilities import normalize_robot_name
text = normalize_robot_name(token.value)
for prefix in BDD_PREFIXES:
if text.startswith(prefix):
new_name = token.value[len(prefix) :]
return Token(
type=token.type,
value=new_name,
lineno=token.lineno,
col_offset=token.col_offset + len(prefix),
error=token.error,
)
return token
| 31.885393 | 86 | 0.58045 |
de1d858ce6ceb0afac552cf2587e2a247d876af8 | 1,015 | py | Python | stocks_api/__init__.py | IndigoShock/stocks_api | 8d0b1fea8fc9637d9580e0f69d9a6585c04472d8 | [
"MIT"
] | null | null | null | stocks_api/__init__.py | IndigoShock/stocks_api | 8d0b1fea8fc9637d9580e0f69d9a6585c04472d8 | [
"MIT"
] | null | null | null | stocks_api/__init__.py | IndigoShock/stocks_api | 8d0b1fea8fc9637d9580e0f69d9a6585c04472d8 | [
"MIT"
] | null | null | null | from pyramid.config import Configurator
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Allow, ALL_PERMISSIONS
class RootACL:
__acl__ = [
(Allow, 'admin', ALL_PERMISSIONS),
(Allow, 'view', ['read']),
]
def __init__(self, request):
pass
def add_role_principles(userid, request):
return request.jwt_claims.get('roles', [])
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_restful')
config.include('pyramid_jwt')
config.set_root_factory(RootACL)
config.set_authorization_policy(ACLAuthorizationPolicy())
config.set_jwt_authentication_policy(
'superseekretseekrit', # os.environ.get('SECRET', None)
auth_type='Bearer',
callback=add_role_principles
)
config.include('.models')
config.include('.routes')
config.scan()
return config.make_wsgi_app()
| 27.432432 | 64 | 0.698522 |
5e56f3f582abfff11a2e752521ca54038cadfaea | 312 | py | Python | 002/inout.py | sauhor/miniprocon | 841ffcc50f7955cba9f9dd45a405d55928a61fec | [
"MIT"
] | null | null | null | 002/inout.py | sauhor/miniprocon | 841ffcc50f7955cba9f9dd45a405d55928a61fec | [
"MIT"
] | null | null | null | 002/inout.py | sauhor/miniprocon | 841ffcc50f7955cba9f9dd45a405d55928a61fec | [
"MIT"
] | null | null | null | # inout.py
#coding: utf-8
import sys
from solve import solve
argvs=sys.argv
argc=len(argvs)
if argc >= 2:
f=open(argvs[1],"r")
else:
f=open("problem.txt","r")
noshi=[line.strip('\n') for line in f]
spam=noshi[0].split(' ')
n=spam[0]
m=spam[1]
noshimochi=noshi[1:]
print solve(int(n),int(m),noshimochi)
| 16.421053 | 38 | 0.657051 |
a072403754baa0e13a154ef04a852ecffdb553db | 2,719 | py | Python | projects/migrations/0001_initial.py | Tajeu2001/awwards | 3a7b068f5d66336d4881a91b6a49338dc6f900d6 | [
"MIT"
] | null | null | null | projects/migrations/0001_initial.py | Tajeu2001/awwards | 3a7b068f5d66336d4881a91b6a49338dc6f900d6 | [
"MIT"
] | null | null | null | projects/migrations/0001_initial.py | Tajeu2001/awwards | 3a7b068f5d66336d4881a91b6a49338dc6f900d6 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.24 on 2021-07-17 12:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='landing_page')),
('title', models.CharField(max_length=50)),
('description', models.TextField()),
('link', models.CharField(max_length=100)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design', models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=0)),
('usability', models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=0)),
('content', models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=0)),
('overall_score', models.IntegerField(blank=True, default=0)),
('comment', models.TextField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(default='anonymous.png', upload_to='profile_pics/')),
('bio', models.TextField()),
('location', models.CharField(blank=True, max_length=60)),
('contact', models.CharField(blank=True, max_length=10)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 50.351852 | 166 | 0.568959 |
37cc1be23a9035756296bad2fb4f0904b191e62f | 5,133 | py | Python | ezo/tests/test_ezo.py | robinagist/ezo | fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986 | [
"MIT"
] | 6 | 2018-06-17T13:11:25.000Z | 2021-02-22T08:54:31.000Z | ezo/tests/test_ezo.py | robinagist/ezo | fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986 | [
"MIT"
] | 53 | 2018-05-31T06:43:27.000Z | 2021-06-11T17:45:29.000Z | ezo/tests/test_ezo.py | robinagist/ezo | fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986 | [
"MIT"
] | null | null | null |
from cli.ezo_cli import EZOApp
from core.lib import EZO
import pytest
'''
Ezo full integration test
'''
class EZOTestApp(EZOApp):
class Meta:
argv = []
config_files = []
### compiles and deploys
def test_01_ezo_compile_contract(capsys):
with EZOTestApp(argv=['compile', 'time_oracle.sol', '--overwrite'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'CONTRACT' in err
def test_01a_ezo_compile_contract_no_overwrite_with_error(capsys):
with EZOTestApp(argv=['compile', 'time_oracle.sol'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'already exists' in err
assert 'error while persisting Contract to datastore' in err
def test_01a_ezo_compile_contract_no_contract_by_filename_with_error(capsys):
with EZOTestApp(argv=['compile', 'throatwobbler_mangrove.sol'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'No such file or directory' in err
def test_02_ezo_deploy_contract_no_overwrite_with_error(capsys):
with EZOTestApp(argv=['deploy', 'TimestampRequestOracle', '-t', 'test'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'deployment on test already exists for contract ' in err
def test_02a_ezo_deploy_bad_contract_name_with_error(capsys):
with EZOTestApp(argv=['deploy', 'BadContractNameLtd', '-t', 'test'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'not found -- has it been compiled' in err
def test_02b_ezo_deploy_contract_missing_target_with_error(capsys):
with EZOTestApp(argv=['deploy', 'BadContractNameLtd'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'target must be set with the -t option before deploying' in err
@pytest.mark.skip
def test_02c_ezo_deploy_contract_bad_target_name_with_error(capsys):
with EZOTestApp(argv=['deploy', 'TimestampRequestOracle', '-t', 'veryNaughtTargetName'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert '' in err
### views and files
def test_f_ezo_deploy(capsys):
with EZOTestApp(argv=['view', 'deploys'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'deploy' in out
def test_g_ezo_view_contracts(capsys):
with EZOTestApp(argv=['view', 'contracts'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'contract' in out
### missing commands tests
def test_g_ezo_view_missing_command(capsys):
with EZOTestApp(argv=['view'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'Ezo needs more words to work' in out
def test_g_ezo_create_missing_command(capsys):
with EZOTestApp(argv=['create'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'Ezo needs more words to work' in out
def test_g_ezo_create_gibberish_command(capsys):
with EZOTestApp(argv=['create', 'bigdufus'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'Ezo needs more words to work' in out
def test_g_ezo_send_missing_command(capsys):
with EZOTestApp(argv=['send'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'Ezo needs more words to work' in out
def test_g_ezo_send_tx_missing_target_and_missing_params(capsys):
with EZOTestApp(argv=['send', 'tx'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'target must be set with the -t option before deploying' in err
def test_g_ezo_send_tx_missing_params(capsys):
with EZOTestApp(argv=['send', 'tx', '-t', 'test'], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'missing parameters for send tx' in err
def test_g_ezo_missing_command(capsys):
with EZOTestApp(argv=[], config_files=['testezo.conf']) as app:
app.ezo = EZO(app.config["ezo"])
app.run()
out, err = capsys.readouterr()
assert 'Ezo needs more words to work' in out
| 33.993377 | 131 | 0.653419 |
82b5fcd7828d1faccfae99477f8a44449ca4c9c1 | 2,811 | py | Python | working_example/python/hello_serverless/hello_serverless/hello_serverless_stack.py | darko-mesaros/workshop-serverless-with-cdk | bbfd30de43d01251565c019a8ac259706bd6f1d0 | [
"MIT"
] | 33 | 2020-08-12T08:08:08.000Z | 2022-03-20T20:32:18.000Z | working_example/python/hello_serverless/hello_serverless/hello_serverless_stack.py | darko-mesaros/workshop-serverless-with-cdk | bbfd30de43d01251565c019a8ac259706bd6f1d0 | [
"MIT"
] | 2 | 2020-08-12T09:54:53.000Z | 2020-08-12T13:37:22.000Z | working_example/python/hello_serverless/hello_serverless/hello_serverless_stack.py | darko-mesaros/workshop-serverless-with-cdk | bbfd30de43d01251565c019a8ac259706bd6f1d0 | [
"MIT"
] | 17 | 2020-08-12T08:09:46.000Z | 2021-07-18T19:52:50.000Z | from aws_cdk import (
aws_lambda as _lambda,
aws_apigateway as apigw,
aws_dynamodb as dynamodb,
core
)
class HelloServerlessStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, prod: bool, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
if prod:
dynamodb_read_write_cap = 200
api_gw_name = "PROD_CDK_API"
table_name = "PROD_CDK_USERS"
concurrency = 100
else:
dynamodb_read_write_cap = 5
api_gw_name = "STAGE_CDK_API"
table_name = "STAGE_CDK_USERS"
concurrency = 5
# --- dynamodb ---
table = dynamodb.Table(
self, "people",
partition_key = dynamodb.Attribute(name="name", type=dynamodb.AttributeType.STRING),
table_name = table_name,
read_capacity = dynamodb_read_write_cap,
billing_mode = dynamodb.BillingMode.PROVISIONED,
)
# --- api gateway ---
api = apigw.RestApi(self, "the_api")
# --- lambdas ---
## Hello World Lambda
hello_lambda = _lambda.Function(
self, "hello_lambda",
runtime = _lambda.Runtime.PYTHON_3_8,
code= _lambda.Code.from_asset("lambda"),
handler = "hello.handler",
environment={"table":table.table_name},
)
hello_integ = apigw.LambdaIntegration(hello_lambda)
api_hello = api.root.add_resource("hello")
api_hello.add_method("GET", hello_integ)
## Create User Lambda
create_lambda = _lambda.Function(
self, "create_lambda",
runtime = _lambda.Runtime.PYTHON_3_8,
code= _lambda.Code.from_asset("lambda"),
handler = "create.handler",
environment={"table":table.table_name},
)
create_integ = apigw.LambdaIntegration(create_lambda)
api_create = api.root.add_resource("create")
api_create.add_method("POST", create_integ)
## Read User Lambda
read_lambda = _lambda.Function(
self, "read_lambda",
runtime = _lambda.Runtime.PYTHON_3_8,
code= _lambda.Code.from_asset("lambda"),
handler = "read.handler",
environment={"table":table.table_name},
)
read_integ = apigw.LambdaIntegration(read_lambda)
api_read = api.root.add_resource("read")
api_read.add_method("GET", read_integ)
# --- table permissions ---
table.grant_read_data(read_lambda)
table.grant_read_write_data(create_lambda)
| 36.986842 | 100 | 0.566702 |
2f6d6395b4f58fd85e8d0c680c7895d49a4fa92e | 13,305 | py | Python | keras_retinanet/bin/train_com.py | FMsunyh/keras-retinanet | cb86a987237d3f6bd504004e2b186cf65606c890 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/bin/train_com.py | FMsunyh/keras-retinanet | cb86a987237d3f6bd504004e2b186cf65606c890 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/bin/train_com.py | FMsunyh/keras-retinanet | cb86a987237d3f6bd504004e2b186cf65606c890 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import keras_retinanet.bin.gpu_setting
keras_retinanet.bin.gpu_setting.set_gpu()
import argparse
import os
import sys
import keras
import keras.preprocessing.image
from keras.utils import multi_gpu_model
import tensorflow as tf
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from .. import losses
from .. import layers
from ..callbacks import RedirectModel
from ..callbacks.eval import Evaluate
from ..preprocessing.pascal_voc import PascalVocGenerator
from ..utils.transform import random_transform_generator
from ..utils.keras_version import check_keras_version
from ..utils.model import freeze as freeze_model
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def model_with_weights(model, weights, skip_mismatch):
if weights is not None:
model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
return model
def create_models(backbone_retinanet, backbone, num_classes, weights, multi_gpu=0, freeze_backbone=False):
modifier = freeze_model if freeze_backbone else None
# Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
# optionally wrap in a parallel model
if multi_gpu > 1:
with tf.device('/cpu:0'):
model = model_with_weights(backbone_retinanet(num_classes, backbone=backbone, nms=False, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = multi_gpu_model(model, gpus=multi_gpu)
# append NMS for prediction only
classification = model.outputs[1]
detections = model.outputs[2]
boxes = keras.layers.Lambda(lambda x: x[:, :, :4])(detections)
detections = layers.NonMaximumSuppression(name='nms')([boxes, classification, detections])
prediction_model = keras.models.Model(inputs=model.inputs, outputs=model.outputs[:2] + [detections])
else:
model = model_with_weights(backbone_retinanet(num_classes, backbone=backbone, nms=True, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = model
prediction_model = model
# compile model
training_model.compile(
loss={
'regression' : losses.smooth_l1(),
'classification': losses.focal()
},
optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
)
return model, training_model, prediction_model
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
callbacks = []
# save the prediction model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
os.makedirs(args.snapshot_path, exist_ok=True)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
'{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)
),
verbose=1,
monitor='loss',
save_best_only=True,
mode='min',
)
checkpoint = RedirectModel(checkpoint, prediction_model)
callbacks.append(checkpoint)
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir = args.tensorboard_dir,
histogram_freq = 0,
batch_size = args.batch_size,
write_graph = True,
write_grads = False,
write_images = False,
embeddings_freq = 0,
embeddings_layer_names = None,
embeddings_metadata = None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from ..callbacks.coco import CocoEval
# use prediction model for evaluation
evaluation = CocoEval(validation_generator)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
callbacks.append(keras.callbacks.ReduceLROnPlateau(
monitor = 'loss',
factor = 0.1,
patience = 2,
verbose = 1,
mode = 'auto',
epsilon = 0.0001,
cooldown = 0,
min_lr = 0
))
return callbacks
def create_generators(args):
# create random transform generator for augmenting training data
transform_generator = random_transform_generator(flip_x_chance=0.5)
if args.dataset_type == 'pascal':
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
transform_generator=transform_generator,
batch_size=args.batch_size
)
validation_generator = PascalVocGenerator(
args.pascal_path,
'test',
batch_size=args.batch_size
)
return train_generator, validation_generator
def check_args(parsed_args):
"""
Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
:param parsed_args: parser.parse_args()
:return: parsed_args
"""
if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
parsed_args.multi_gpu))
if parsed_args.multi_gpu > 1 and parsed_args.snapshot:
raise ValueError(
"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.".format(parsed_args.multi_gpu,
parsed_args.snapshot))
if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:
raise ValueError("Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.")
if 'resnet' in parsed_args.backbone:
from ..models.resnet import validate_backbone
elif 'mobilenet' in parsed_args.backbone:
from ..models.mobilenet import validate_backbone
else:
raise NotImplementedError('Backbone \'{}\' not implemented.'.format(parsed_args.backbone))
validate_backbone(parsed_args.backbone)
return parsed_args
def parse_args(args):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
pascal_parser = subparsers.add_parser('pascal')
# pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).', default='/home/syh/datasets/VOCdevkit/VOC2012' )
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).', default='/home/syh/RetinaNet/data/train' )
# pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
group = parser.add_mutually_exclusive_group()
group.add_argument('--snapshot', help='Resume training from a snapshot.')
group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
group.add_argument('--weights', help='Initialize the model with weights from a file.')
group.add_argument('--no-weights', help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)
parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet101', type=str)
parser.add_argument('--batch-size', help='Size of the batches.', default=2, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=200)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=3000)
parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='/disk2/train/snapshots')
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
return check_args(parser.parse_args(args))
def data_info(train_generator, validation_generator):
'''
print the data message.
:param train_generator:
:param validation_generator:
:return:
'''
print( '---------------------------------------------------' )
print('train images size:', train_generator.size())
print('train num classes:', train_generator.num_classes())
print('test images size:', validation_generator.size())
print('test num classes:', validation_generator.num_classes())
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# os.environ['CUDA_VISIBLE_DEVICES'] = ""
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# os.environ['CUDA_VISIBLE_DEVICES'] = ""
keras.backend.tensorflow_backend.set_session(get_session())
# create the generators
train_generator, validation_generator = create_generators(args)
data_info(train_generator, validation_generator)
if 'resnet' in args.backbone:
from ..models.resnet import resnet_retinanet as retinanet, custom_objects, download_imagenet
elif 'mobilenet' in args.backbone:
from ..models.mobilenet import mobilenet_retinanet as retinanet, custom_objects, download_imagenet
else:
raise NotImplementedError('Backbone \'{}\' not implemented.'.format(args.backbone))
# create the model
if args.snapshot is not None:
print(args.snapshot)
print('Loading model, this may take a second...')
model = keras.models.load_model(args.snapshot, custom_objects=custom_objects)
training_model = model
prediction_model = model
else:
weights = args.weights
# default to imagenet if nothing else is specified
if weights is None and args.imagenet_weights:
weights = download_imagenet(args.backbone)
print('Creating model, this may take a second...')
model, training_model, prediction_model = create_models(
backbone_retinanet=retinanet,
backbone=args.backbone,
num_classes=train_generator.num_classes(),
weights=weights,
multi_gpu=args.multi_gpu,
freeze_backbone=args.freeze_backbone
)
# print model summary
# print(model.summary())
# create the callbacks
callbacks = create_callbacks(
model,
training_model,
prediction_model,
validation_generator,
args,
)
# start training
training_model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
epochs=args.epochs,
verbose=1,
callbacks=callbacks,
)
if __name__ == '__main__':
main()
| 39.954955 | 187 | 0.672905 |
0d9d187a5c96b509aed8671ca1f4460cfbe61cff | 2,108 | py | Python | nylon/supplementaries/main.py | caiogasparine/nylon | af3413932216a3ca76afaff7820d9211b7f3b7ef | [
"MIT"
] | 1 | 2022-01-27T05:33:27.000Z | 2022-01-27T05:33:27.000Z | nylon/supplementaries/main.py | caiogasparine/nylon | af3413932216a3ca76afaff7820d9211b7f3b7ef | [
"MIT"
] | null | null | null | nylon/supplementaries/main.py | caiogasparine/nylon | af3413932216a3ca76afaff7820d9211b7f3b7ef | [
"MIT"
] | null | null | null | from nylon.data.reader import DataReader
import json
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression
from sklearn.impute import SimpleImputer
import shutil
import sys
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import (OneHotEncoder,
StandardScaler,
LabelEncoder)
from importlib import import_module
import os
preprocess_vocab = {'one-hot': OneHotEncoder, 'label-encode': LabelEncoder, 'fill': SimpleImputer,
'scale': StandardScaler}
modeling_vocab = {'linear': LinearRegression}
analysis_vocab = {'cross-val': cross_val_score, 'acc-score': accuracy_score}
def read_json(json_file):
with open(json_file) as f:
data = json.load(f)
return data
def import_from(module, name):
module = __import__(module, fromlist=[name])
return getattr(module, name)
def dataset_initializer(request_info):
dataset = request_info['df']
json_file_path = request_info['json']
if isinstance(json_file_path, str):
json_file = read_json(json_file_path)
else:
json_file = json_file_path
if "custom" not in json_file['data']:
if "target" not in json_file['data']:
raise Exception("A target column has to specified under the -- target -- keyword.")
df = DataReader(json_file, dataset)
df = df.data_reader()
else:
sys_path = "/nylon/supplementaries/buffer/"
sys.path.insert(1, os.getcwd() + sys_path)
absolute_path = os.path.abspath(os.getcwd()) + '/nylon/supplementaries/buffer/temp.py'
file_name = json_file['data']['custom']['loc'].rsplit("/")[-1]
shutil.copy(json_file['data']['custom']['loc'], absolute_path)
mod = import_module('temp')
new_func = getattr(mod, json_file['data']['custom']['name'])
df = new_func(json_file)
sys.path.remove(sys_path)
os.remove("./buffer/temp.py")
request_info['df'] = df
request_info['json'] = json_file
return request_info
| 31.939394 | 98 | 0.665085 |
0b77d8e3c4af70b67f4316de05be04db80e3a162 | 884 | py | Python | NeuroDash/scripts/app.py | brainhack-school2020/maellef_IDK_project | ac2aec66b7c6ec557716269ab9aaab97905b694c | [
"MIT"
] | null | null | null | NeuroDash/scripts/app.py | brainhack-school2020/maellef_IDK_project | ac2aec66b7c6ec557716269ab9aaab97905b694c | [
"MIT"
] | 2 | 2020-05-20T20:05:39.000Z | 2020-05-26T19:52:58.000Z | NeuroDash/scripts/app.py | brainhack-school2020/maellef_IDK_project | ac2aec66b7c6ec557716269ab9aaab97905b694c | [
"MIT"
] | null | null | null | import dash
import dash_html_components as html
import os
import bids_reader as br
import dash_utils as du
def show_bids_layout(input_value):
if not os.path.lexists(input_value):
return 'this path isn\'t correct. Please enter a real path.'
return '{}'.format(br.read_layout_from_dataset(input_value))
#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__)#, external_stylesheets=external_stylesheets)
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
app.layout = html.Div(children=[
html.H1(children='BidsDash'),
html.Div(children='''
help visualise the content of a BIDS dataset
''')])
du.input_text(app, show_bids_layout,
my_id='my-id', my_div='my-div',
default_text='enter a absolute path')
if __name__ == '__main__':
app.run_server(debug=True) | 25.257143 | 70 | 0.687783 |
b7815e2751a39aa2ce678f9f73ead3112221bacb | 774 | py | Python | examples/neural.py | HelloChatterbox/simple_NER | 678f0003030c9158583122e8b818a3b4e4ba3ea1 | [
"MIT"
] | 25 | 2019-12-26T14:10:47.000Z | 2022-03-16T02:17:16.000Z | examples/neural.py | HelloChatterbox/simple_NER | 678f0003030c9158583122e8b818a3b4e4ba3ea1 | [
"MIT"
] | null | null | null | examples/neural.py | HelloChatterbox/simple_NER | 678f0003030c9158583122e8b818a3b4e4ba3ea1 | [
"MIT"
] | 5 | 2020-08-16T16:38:09.000Z | 2022-03-21T16:59:16.000Z | from simple_NER.rules.neural import NeuralNER
ner = NeuralNER()
ner.add_rule("name", "my name is {person}")
for ent in ner.extract_entities("the name is jarbas"):
assert ent.as_json()["value"] == 'jarbas'
for ent in ner.extract_entities("name is kevin"):
# {'confidence': 0.8363423970007801,
# 'data': {},
# 'entity_type': 'person',
# 'rules': [{'name': 'name',
# 'rules': ['my name is {person}']}],
# 'source_text': 'name is kevin',
# 'spans': [(8, 13)],
# 'value': 'kevin'}
assert ent.as_json()["value"] == 'kevin'
| 40.736842 | 81 | 0.431525 |
6b1e2cddb53f6078fbe564d7ef4c9254656af189 | 418 | py | Python | 01_Natural_Language_Processing/cha01/show_spiral_dataset.py | ROY-SNN/NLP_GOGO | 89e6f98335c0b160c275f8a3752639727fc7439a | [
"Apache-2.0"
] | 2 | 2021-05-23T09:35:07.000Z | 2021-12-27T05:16:52.000Z | 01_Natural_Language_Processing/cha01/show_spiral_dataset.py | ROY-SNN/NLP_GOGO | 89e6f98335c0b160c275f8a3752639727fc7439a | [
"Apache-2.0"
] | null | null | null | 01_Natural_Language_Processing/cha01/show_spiral_dataset.py | ROY-SNN/NLP_GOGO | 89e6f98335c0b160c275f8a3752639727fc7439a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import sys
sys.path.append('..') # 为了引入父目录的文件而进行的设定
from Natural_Language_Processing.dataset import spiral
import matplotlib.pyplot as plt
x, t = spiral.load_data()
print('x', x.shape) # (300, 2)
print('t', t.shape) # (300, 3)
# 绘制数据点
N = 100
CLS_NUM = 3
markers = ['o', 'x', '^']
for i in range(CLS_NUM):
plt.scatter(x[i*N:(i+1)*N, 0], x[i*N:(i+1)*N, 1], s=40, marker=markers[i])
plt.show()
| 22 | 78 | 0.636364 |
fa9676138276feb6126ca746f385cc233d16d6f5 | 773 | py | Python | source/pkgsrc/editors/medit/patches/patch-tools_xml2h.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | 1 | 2021-11-20T22:46:39.000Z | 2021-11-20T22:46:39.000Z | source/pkgsrc/editors/medit/patches/patch-tools_xml2h.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | null | null | null | source/pkgsrc/editors/medit/patches/patch-tools_xml2h.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | null | null | null | $NetBSD: patch-tools_xml2h.py,v 1.1 2021/04/11 00:58:16 tsutsui Exp $
- mechanical python3 support
--- tools/xml2h.py.orig 2012-12-16 23:55:32.000000000 +0000
+++ tools/xml2h.py
@@ -11,13 +11,13 @@ tmp_output = output + '.tmp'
varname = sys.argv[3]
outfile = open(tmp_output, 'w')
-print >> outfile, '/* -*- C -*- */'
-print >> outfile, 'static const char %s [] = ""' % (varname,)
+print('/* -*- C -*- */', file=outfile)
+print('static const char %s [] = ""' % (varname,), file=outfile)
for line in open(input):
if line.endswith('\n'):
line = line[:-1]
- print >> outfile, '"' + line.replace('"', '\\"') + '\\n"'
-print >> outfile, ';'
+ print('"' + line.replace('"', '\\"') + '\\n"', file=outfile)
+print(';', file=outfile)
outfile.close()
| 30.92 | 69 | 0.558862 |
8e2ce3426dfcaf0925aefdffb9e41b1725a04c54 | 8,514 | py | Python | packages/python/plotly/plotly/tests/test_core/test_figure_messages/test_on_change.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | packages/python/plotly/plotly/tests/test_core/test_figure_messages/test_on_change.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | packages/python/plotly/plotly/tests/test_core/test_figure_messages/test_on_change.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import sys
from unittest import TestCase
import pytest
import plotly.graph_objs as go
if sys.version_info >= (3, 3):
from unittest.mock import MagicMock
else:
from mock import MagicMock
class TestOnChangeCallbacks(TestCase):
def setUp(self):
# Construct initial scatter object
self.figure = go.Figure(
data=[
go.Scatter(y=[3, 2, 1], marker={"color": "green"}),
go.Bar(y=[3, 2, 1, 0, -1], marker={"opacity": 0.5}),
],
layout={"xaxis": {"range": [-1, 4]}, "width": 1000},
frames=[go.Frame(layout={"yaxis": {"title": "f1"}})],
)
# on_change validation
# --------------------
def test_raise_if_no_figure(self):
scatt = go.Scatter()
fn = MagicMock()
with pytest.raises(ValueError):
scatt.on_change(fn, "x")
def test_raise_on_frame_hierarchy(self):
fn = MagicMock()
with pytest.raises(ValueError):
self.figure.frames[0].layout.xaxis.on_change(fn, "range")
def test_validate_property_path_nested(self):
fn = MagicMock()
with pytest.raises(ValueError):
self.figure.layout.xaxis.on_change(fn, "bogus")
def test_validate_property_path_nested(self):
fn = MagicMock()
with pytest.raises(ValueError):
self.figure.layout.on_change(fn, "xaxis.titlefont.bogus")
# Python triggered changes
# ------------------------
def test_single_prop_callback_on_assignment(self):
# Install callbacks on 'x', and 'y' property of first trace
fn_x = MagicMock()
fn_y = MagicMock()
self.figure.data[0].on_change(fn_x, "x")
self.figure.data[0].on_change(fn_y, "y")
# Setting x and y on second trace does not trigger callback
self.figure.data[1].x = [1, 2, 3]
self.figure.data[1].y = [1, 2, 3]
self.assertFalse(fn_x.called)
self.assertFalse(fn_y.called)
# Set x on first trace
self.figure.data[0].x = [10, 20, 30]
fn_x.assert_called_once_with(self.figure.data[0], (10, 20, 30))
self.assertFalse(fn_y.called)
# Set y on first trace
self.figure.data[0].y = [11, 22, 33]
fn_y.assert_called_once_with(self.figure.data[0], (11, 22, 33))
def test_multi_prop_callback_on_assignment_trace(self):
# Register callback if either 'x' or 'y' changes on first trace
fn = MagicMock()
self.figure.data[0].on_change(fn, "x", "y")
# Perform assignment on one of the properties
self.figure.data[0].x = [11, 22, 33]
# Check function called once with new value of x and old value of y
fn.assert_called_once_with(self.figure.data[0], (11, 22, 33), (3, 2, 1))
def test_multi_prop_callback_on_assignment_layout(self):
fn_range = MagicMock()
# Register callback if either axis range is changed. Both tuple and
# dot syntax are supported for nested properties
self.figure.layout.on_change(fn_range, ("xaxis", "range"), "yaxis.range")
self.figure.layout.xaxis.range = [-10, 10]
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), None)
def test_multi_prop_callback_on_assignment_layout_nested(self):
fn_titlefont = MagicMock()
fn_xaxis = MagicMock()
fn_layout = MagicMock()
# Register callback on change to family property under titlefont
self.figure.layout.xaxis.titlefont.on_change(fn_titlefont, "family")
# Register callback on the range and titlefont.family properties
# under xaxis
self.figure.layout.xaxis.on_change(fn_xaxis, "range", "title.font.family")
# Register callback on xaxis object itself
self.figure.layout.on_change(fn_layout, "xaxis")
# Assign a new xaxis range and titlefont.family
self.figure.layout.xaxis.title.font.family = "courier"
# Check that all callbacks were executed once
fn_titlefont.assert_called_once_with(
self.figure.layout.xaxis.title.font, "courier"
)
fn_xaxis.assert_called_once_with(self.figure.layout.xaxis, (-1, 4), "courier")
fn_layout.assert_called_once_with(
self.figure.layout,
go.layout.XAxis(range=(-1, 4), title={"font": {"family": "courier"}}),
)
def test_prop_callback_nested_arrays(self):
# Initialize updatemenus and buttons
self.figure.layout.updatemenus = [{}, {}, {}]
self.figure.layout.updatemenus[2].buttons = [{}, {}]
self.figure.layout.updatemenus[2].buttons[1].label = "button 1"
self.figure.layout.updatemenus[2].buttons[1].method = "relayout"
# Register method callback
fn_button = MagicMock()
fn_layout = MagicMock()
self.figure.layout.updatemenus[2].buttons[1].on_change(fn_button, "method")
self.figure.layout.on_change(fn_layout, "updatemenus[2].buttons[1].method")
# Update button method
self.figure.layout.updatemenus[2].buttons[1].method = "restyle"
# Check that both callbacks are called once
fn_button.assert_called_once_with(
self.figure.layout.updatemenus[2].buttons[1], "restyle"
)
fn_layout.assert_called_once_with(self.figure.layout, "restyle")
def test_callback_on_update(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range")
self.figure.update({"layout": {"yaxis": {"range": [11, 22]}}})
fn_range.assert_called_once_with(self.figure.layout, (-1, 4), (11, 22))
def test_callback_on_update_single_call(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
self.figure.update(
{"layout": {"xaxis": {"range": [-10, 10]}, "yaxis": {"range": [11, 22]}}}
)
# Even though both properties changed, callback should be called
# only once with the new value of both properties
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
def test_callback_on_batch_update(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
with self.figure.batch_update():
self.figure.layout.xaxis.range = [-10, 10]
self.figure.layout.width = 500
# Check fn not called before context exits
self.assertFalse(fn_range.called)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), None, 500)
def test_callback_on_batch_animate(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
with self.figure.batch_animate():
self.figure["layout.xaxis.range"] = [-10, 10]
self.figure[("layout", "yaxis", "range")] = (11, 22)
# Check fn not called before context exits
self.assertFalse(fn_range.called)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
def test_callback_on_plotly_relayout(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
self.figure.plotly_relayout(
relayout_data={"xaxis.range": [-10, 10], "yaxis.range": [11, 22]}
)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
def test_callback_on_plotly_restyle(self):
# Register callback if either 'x' or 'y' changes on first trace
fn = MagicMock()
self.figure.data[0].on_change(fn, "x", "y")
# Perform assignment on one of pthe properties
self.figure.plotly_restyle(
{"x": [[11, 22, 33], [1, 11, 111]]}, trace_indexes=[0, 1]
)
# Check function called once with new value of x and old value of y
fn.assert_called_once_with(self.figure.data[0], (11, 22, 33), (3, 2, 1))
def test_callback_on_plotly_update(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
self.figure.plotly_update(
restyle_data={"marker.color": "blue"},
relayout_data={"xaxis.range": [-10, 10], "yaxis.range": [11, 22]},
)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
| 37.672566 | 87 | 0.624501 |
86a56b73d47c23a31cf48c2b59563c094d1eb902 | 1,097 | py | Python | classes/marker_class.py | tum-phoenix/drive_ros_marker_detection | 63ca42b87499d530ab91a0ee812e55faa47ffb14 | [
"BSD-3-Clause"
] | null | null | null | classes/marker_class.py | tum-phoenix/drive_ros_marker_detection | 63ca42b87499d530ab91a0ee812e55faa47ffb14 | [
"BSD-3-Clause"
] | 1 | 2018-11-29T14:21:36.000Z | 2018-11-29T14:21:36.000Z | classes/marker_class.py | tum-phoenix/drive_ros_marker_detection | 63ca42b87499d530ab91a0ee812e55faa47ffb14 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# mapping: id (as int) to sign description (string)
import pickle
# all carolo cup signs currently in use
marker_name_dict = {
0: '10_speed_limit',
1: '20_speed_limit',
2: '30_speed_limit',
3: '40_speed_limit',
4: '50_speed_limit',
5: '60_speed_limit',
6: '70_speed_limit',
7: '80_speed_limit',
8: '90_speed_limit',
9: 'end_10_speed_limit',
10: 'end_20_speed_limit',
11: 'end_30_speed_limit',
12: 'end_40_speed_limit',
13: 'end_50_speed_limit',
14: 'end_60_speed_limit',
15: 'end_70_speed_limit',
16: 'end_80_speed_limit',
17: 'end_90_speed_limit',
18: 'right_arrow',
19: 'left_arrow',
20: 'startline',
21: 'broken_crossing_line',
22: 'continuous_crossing_line',
23: 'left_crossing_turning_line',
24: 'right_crossing_turning_line',
25: 'startline',
26: 'zebra_crossing'
}
with open('marker_name_dict.pkl', 'wb') as f:
pickle.dump(marker_name_dict, f)
| 28.868421 | 51 | 0.58979 |
b25d3d4097ea668ca229cf25cfcdac1207e5eca9 | 1,912 | py | Python | src/polydev/github/zest_hooks.py | Polyconseil/polydev-github | db2c49a8347bc7e3049fe234086676179dfc55b4 | [
"BSD-3-Clause"
] | null | null | null | src/polydev/github/zest_hooks.py | Polyconseil/polydev-github | db2c49a8347bc7e3049fe234086676179dfc55b4 | [
"BSD-3-Clause"
] | null | null | null | src/polydev/github/zest_hooks.py | Polyconseil/polydev-github | db2c49a8347bc7e3049fe234086676179dfc55b4 | [
"BSD-3-Clause"
] | null | null | null | import re
import subprocess
import github_release
def format_changelog(data):
lines = data['history_last_release'].splitlines()
last_line = data['history_insert_line_here']
lines = ['## Changelog'] + lines[2:last_line]
return '\n'.join(lines)
def get_github_repositories():
remotes = subprocess.check_output(['git', 'remote', '-v']).decode() # noqa:S603,S607
return set(re.findall(r'git@github\.com:([\w-]+/[\w-]+)(?:\.git)?', remotes))
def ask_direct_question(question, default=None):
final_marks = {True: '(Y/n)?', False: '(y/N)?', None: '(y/n)?'}
input_values = {True: ['y', 'yes'], False: ['n', 'no']}
values = {
text: value
for value, inputs in input_values.items()
for text in inputs
}
values[''] = default
message = '{} {} '.format(
question.rstrip('?'),
final_marks[default],
)
response = None
while not isinstance(response, bool):
raw = input(message)
response = values.get(raw.lower())
return response
def publish_release_on_github(data):
release_tag = data['headings'][0]['version'] # Should be data['version']...
changelog = format_changelog(data)
for repository in get_github_repositories():
response = ask_direct_question(
"\n\n{changelog}\n\nPublish release {release} to Github ({repo})?".format(
changelog=changelog,
release=release_tag,
repo=repository,
),
default=True,
)
if response:
try:
github_release.gh_release_create(
repository,
release_tag,
body=changelog,
publish=True,
)
except EnvironmentError as e:
print(e)
print('=> You should create the release manualy.')
| 28.969697 | 89 | 0.562762 |
d631725e320080cb2cfa8d4df3b68390768ef92f | 2,044 | py | Python | python/001_Two_Sum.py | xiyuansun/leetcode | a65a536017f9583b2479c90634bf140a36f70727 | [
"MIT"
] | null | null | null | python/001_Two_Sum.py | xiyuansun/leetcode | a65a536017f9583b2479c90634bf140a36f70727 | [
"MIT"
] | null | null | null | python/001_Two_Sum.py | xiyuansun/leetcode | a65a536017f9583b2479c90634bf140a36f70727 | [
"MIT"
] | null | null | null | class Solution(object):
# def twoSum(self, nums, target):
# """
# :type nums: List[int]
# :type target: int
# :rtype: List[int]
# """
# #n^2
# ls = len(nums)
# for i in range(ls):
# for j in range(i + 1, ls):
# if nums[i] + nums[j] == target:
# return [i, j]
# def twoSum(self, nums, target):
# # hash 1
# hash_nums = {}
# for index, num in enumerate(nums):
# try:
# hash_nums[num].append(index)
# except KeyError:
# hash_nums[num] = [index]
# for index, num in enumerate(nums):
# another = target - num
# try:
# candicate = hash_nums[another]
# if another == num:
# if len(candicate) > 1:
# return candicate
# else:
# continue
# else:
# return [index, candicate[0]]
# except KeyError:
# pass
# def twoSum(self, nums, target):
# # hash 2
# hash_nums = {}
# for index, num in enumerate(nums):
# another = target - num
# try:
# hash_nums[another]
# return [hash_nums[another], index]
# except KeyError:
# hash_nums[num] = index
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
num_index = [(value, index) for (index, value) in enumerate(nums)]
num_index.sort()
begin = 0
end = len(nums) - 1
while begin < end:
curr = num_index[begin][0] + num_index[end][0]
if curr == target:
return [num_index[begin][1], num_index[end][1]]
elif curr < target:
begin = begin + 1
else:
end = end - 1
| 29.2 | 74 | 0.419276 |
c25e6cede5d999193f57d9ad15a1cdad066042c1 | 2,942 | py | Python | leetcode/network_delay_time.py | zhangao0086/Python-Algorithm | 981c875b2e0f30619bd3d44e1f2bd0c47d1464a2 | [
"MIT"
] | 3 | 2021-05-21T12:55:14.000Z | 2022-02-01T16:21:30.000Z | leetcode/network_delay_time.py | zhangao0086/Python-Algorithm | 981c875b2e0f30619bd3d44e1f2bd0c47d1464a2 | [
"MIT"
] | null | null | null | leetcode/network_delay_time.py | zhangao0086/Python-Algorithm | 981c875b2e0f30619bd3d44e1f2bd0c47d1464a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
import collections, heapq
from typing import List
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
queue, graph, dt = [(0, K)], collections.defaultdict(list), {}
for u, v, w in times:
graph[u].append((v, w))
while queue:
time, vertex = heapq.heappop(queue)
if vertex not in dt:
dt[vertex] = time
for v, w in graph[vertex]:
heapq.heappush(queue, (time + w, v))
time = max(dt.values())
return time if len(dt) == N else -1
# class Solution:
# def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
# dt, graph, seen = {}, collections.defaultdict(list), {}
# for u, v, w in times:
# graph[u].append((v, w))
# dt[u], dt[v] = float('inf'), float('inf')
# seen[u], seen[v] = False, False
# if len(dt) < N: return -1
# dt[K] = 0
# while True:
# vertex, min_distance = 0, float('inf')
# for num in graph:
# if not seen[num] and dt[num] < min_distance:
# min_distance, vertex = dt[num], num
# if vertex == 0: break
# seen[vertex] = True
# for v, w in graph[vertex]:
# dt[v] = min(dt[vertex] + w, dt[v])
# time = max(dt.values())
# return time if time != float('inf') else -1
# Queue
# class Solution:
# def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
# dt, graph, queue = {}, collections.defaultdict(list), [(0, K)]
# for u, v, w in times:
# graph[u].append((v, w))
# dt[u], dt[v] = float('inf'), float('inf')
# if len(dt) < N: return -1
# while queue:
# time, vertex = queue.pop(0)
# if time < dt[vertex]:
# dt[vertex] = time
# for v, w in graph[vertex]:
# queue.append((time + w, v))
# time = max(dt.values())
# return time if time != float('inf') else -1
if __name__ == '__main__':
print(Solution().networkDelayTime([[1,2,1],[2,1,3]], 2, 2))
# print(Solution().networkDelayTime([[2,1,1],[2,3,1],[3,4,1]], 4, 2))
# print(Solution().networkDelayTime([[1,2,1],[2,3,1],[3,4,1]], 4, 2))
# print(Solution().networkDelayTime([[1,2,1],[2,3,7],[1,3,4],[2,1,2]], 4, 1))
print(Solution().networkDelayTime([
[4,2,76],
[1,3,79],
[3,1,81],
[4,3,30],
[2,1,47],
[1,5,61],
[1,4,99],
[3,4,68],
[3,5,46],
[4,1,6],
[5,4,7],
[5,3,44],
[4,5,19],
[2,3,13],
[3,2,18],
[1,2,0],
[5,1,25],
[2,5,58],
[2,4,77],
[5,2,74]], 5, 3))
| 31.978261 | 81 | 0.459551 |
7009dc662e1860f414e7a311daabca07e82e945f | 2,885 | py | Python | SplitPDF.py | sambbhavgarg/SplitPDF | bf850d322db815491491e6f784fc588f6925aca2 | [
"MIT"
] | null | null | null | SplitPDF.py | sambbhavgarg/SplitPDF | bf850d322db815491491e6f784fc588f6925aca2 | [
"MIT"
] | null | null | null | SplitPDF.py | sambbhavgarg/SplitPDF | bf850d322db815491491e6f784fc588f6925aca2 | [
"MIT"
] | null | null | null | from PyPDF2 import PdfFileWriter, PdfFileReader
import pandas as pd
import os
class SplitPDF:
def __init__(self, config):
#path to excel data file
self.excel_file_path = os.path.join(config['data_dir'], config['excel_data_file_name'])
#path to all certificates file created from Mail Merge in MS Word
self.all_certs_file_path = os.path.join(config['program_name_dir'], config['all_certs_mail_merge_pdf'])
print(self.all_certs_file_path)
#path to the directory where all individual certificates will be stored
self.out_path = os.path.join(config['program_name_dir'], config['out_dir_name_for_split_pdfs'])
#loading excel data file into a pandas df object
self.df = pd.read_excel(self.excel_file_path, sheet_name=config['sheet_num'])
#adding a column where the student name is suffixed to create unique names, in order to tackle conflict while doing
# - Gmass
self.df['attachment'] = self.df.iloc[:, 0].str.strip() + f"{config['individual_file_suffix_1']}{config['individual_file_suffix_2']}"
print("Printing Results...")
display(self.df)
self.run, self.create_sheet_with_attachments_column = config['run'], config['create_sheet_with_attachments_column']
def splitPDF(self):
if self.run:
#change -1 if the individual file names refer to some other column
#iterator object for storing file names with suffixes
iter_names = iter((self.df.iloc[:, -1]).values.tolist())
#PDF reader object
inputpdf = PdfFileReader(open(self.all_certs_file_path, "rb"))
#create out directory folder if it doesnt exist
if not os.path.exists(self.out_path):
os.mkdir(self.out_path)
#template pdf name
output_pdf = '{}.pdf'
#iteratively get pages from inputpdf and write to new pdf with personalized file name
for i in range(inputpdf.numPages):
# if i%2 == 0:
output = PdfFileWriter()
output.addPage(inputpdf.getPage(i))
# else:
# output.addPage(inputpdf.getPage(i))
# with open(os.path.join(out_path, output_pdf.format(i)), "wb") as outputStream:
# output.write(outputStream)
output_file = os.path.join(self.out_path, output_pdf.format(next(iter_names)))
with open(output_file, 'wb') as outputStream:
output.write(outputStream)
if self.create_sheet_with_attachments_column:
self.df.to_excel(os.path.join(config['program_name_dir'], 'data-with-attachment.xlsx'), index=False)
| 47.295082 | 140 | 0.611438 |
b8a7b35603cb78071bdbe4ebe09c1c213091c4da | 1,790 | py | Python | scripts/artifacts/healthDistance.py | mastenp/iLEAPP | ee40ef7505b36d0b9b04131f284a9d4d036514a5 | [
"MIT"
] | 1 | 2020-10-06T20:28:03.000Z | 2020-10-06T20:28:03.000Z | scripts/artifacts/healthDistance.py | mastenp/iLEAPP | ee40ef7505b36d0b9b04131f284a9d4d036514a5 | [
"MIT"
] | null | null | null | scripts/artifacts/healthDistance.py | mastenp/iLEAPP | ee40ef7505b36d0b9b04131f284a9d4d036514a5 | [
"MIT"
] | null | null | null | import glob
import os
import pathlib
import plistlib
import sqlite3
import json
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
def get_healthDistance(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute(
"""
SELECT
DATETIME(SAMPLES.START_DATE + 978307200, 'UNIXEPOCH') AS "START DATE",
DATETIME(SAMPLES.END_DATE + 978307200, 'UNIXEPOCH') AS "END DATE",
QUANTITY AS "DISTANCE IN METERS",
QUANTITY*3.28084 AS "DISTANCE IN FEET",
(SAMPLES.END_DATE-SAMPLES.START_DATE) AS "TIME IN SECONDS",
SAMPLES.DATA_ID AS "SAMPLES TABLE ID"
FROM
SAMPLES
LEFT OUTER JOIN
QUANTITY_SAMPLES
ON SAMPLES.DATA_ID = QUANTITY_SAMPLES.DATA_ID
LEFT OUTER JOIN
CORRELATIONS
ON SAMPLES.DATA_ID = CORRELATIONS.OBJECT
WHERE
SAMPLES.DATA_TYPE = 8
"""
)
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries == 0:
logfunc('No data available in table')
else:
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5] ))
description = ''
report = ArtifactHtmlReport('Health Distance')
report.start_artifact_report(report_folder, 'Distance', description)
report.add_script()
data_headers = ('Start Date','End Date','Distance in Meters','Distance in Feet','Time in Seconds','Samples Table ID' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Health Distance'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'Health Distance'
timeline(report_folder, tlactivity, data_list, data_headers)
| 29.833333 | 125 | 0.730168 |
617b1aed8f9ede10a27d4f14e2db17ce46e92bf7 | 4,623 | py | Python | Augmentor/Augmentor_phase1.py | AsherYartsevTech/Mask_RCNN | 7e267c73b6eecc5b3c13392c434dbfca1a68ac34 | [
"MIT"
] | 1 | 2019-01-07T17:48:13.000Z | 2019-01-07T17:48:13.000Z | Augmentor/Augmentor_phase1.py | AsherYartsevTech/Mask_RCNN | 7e267c73b6eecc5b3c13392c434dbfca1a68ac34 | [
"MIT"
] | 24 | 2018-12-05T09:51:06.000Z | 2019-03-10T18:49:33.000Z | Augmentor/Augmentor_phase1.py | AsherYartsevTech/Mask_RCNN | 7e267c73b6eecc5b3c13392c434dbfca1a68ac34 | [
"MIT"
] | null | null | null | import numpy
from PIL import Image, ImageDraw
from pycocotools.coco import COCO
import os.path
import json
# dataDir = '/Users/orshemesh/Desktop/Project/augmented_leaves/origin/'
# annFile = dataDir + 'leaves.json'
# output_dir = dataDir + 'output_phase1/'
def create_categories_map(annFile):
with open(annFile) as f:
orig_json = json.load(f)
categories_num = len(orig_json['categories'])
colors_list = numpy.array_split(numpy.array(range(256)), categories_num)
categories_map = {}
for i,category in enumerate(orig_json['categories']):
categories_map[category['id']] = {
'orig_json': category,
'colors': list(colors_list[i])
}
return categories_map
# create a folder with all the masks as multicolor pngs
def augment_create_mask_files(dataset_dir_path, dataset_annotation_file_path, output_dir):
dataDir = dataset_dir_path # e.g: '/Users/orshemesh/Desktop/Project/augmented_leaves/origin/'
annFile = dataset_annotation_file_path # e.g: '/Users/orshemesh/Desktop/Project/augmented_leaves/origin/leaves.json'
output_dir = output_dir # e.g: '/Users/orshemesh/Desktop/Project/augmented_leaves/origin/output_phase1/'
categories_map = create_categories_map(annFile)
coco = COCO(annFile)
categories = coco.loadCats(coco.getCatIds())
categories_names = [category['name'] for category in categories]
print('COCO categories: \n{}\n'.format(' '.join(categories_names)))
cucumbers_Ids = coco.getCatIds(catNms=categories_names)
images_Ids = coco.getImgIds()
imgs = coco.loadImgs(images_Ids)
image_num = 0
for img in imgs:
annotation_Ids = coco.getAnnIds(imgIds=img['id'], catIds=cucumbers_Ids, iscrowd=None)
annotations = coco.loadAnns(annotation_Ids)
# read image as RGB and add alpha (transparency)
images_dir_path = dataDir
image_name = img['file_name']
im = Image.open(os.path.join(images_dir_path, image_name)).convert("RGBA")
# im.show()
# convert to numpy (for convenience)
imArray = numpy.asarray(im)
# assemble new image (uint8: 0-255)
shape = imArray.shape
newImArray = numpy.zeros(imArray.shape, dtype='uint8')
# newImArray = numpy.empty((dy, dx, 4), dtype='uint8')
# colors (three first columns, RGB)
newImArray[:, :,0:3] = imArray[:, :, 0:3]
# for x in range(shape[0]):
# for y in range(shape[1]):
# newImArray[x][y][3] = 0
# newImArray[:, :, :3] = imArray[y1:y2, x1:x2, :3]
mask_colors = []
for annotation in annotations:
# choose a color
color = numpy.random.choice(categories_map[annotation['category_id']]['colors'], size=3)
color = numpy.append(color, [255])
# while color exist choose another one
while len([c for c in mask_colors if c[0]==color[0] and c[1]==color[1] and c[2]==color[2]]) != 0:
color = numpy.random.choice(categories_map[annotation['category_id']]['colors'], size=3)
color = numpy.append(color, [255])
# add to colors that already used
mask_colors.append(color)
for segmentation in annotation['segmentation']:
polygon = []
for j in range(len(segmentation) // 2):
polygon.append((segmentation[2*j], segmentation[2*j+1]))
# create mask
maskIm = Image.new('L', (imArray.shape[1], imArray.shape[0]), 0)
ImageDraw.Draw(maskIm).polygon(polygon, fill=1, outline=0)
mask = numpy.array(maskIm)
# transparency (4th column)
# newImArray[:, :, 3] = mask * 255
# numpy.arange()
# for (x, y), _ in numpy.ndenumerate(mask):
# if mask[x][y] != 0:
# new_mask[x][y] = color
rows = numpy.where(mask[:,:] != 0)
for x, y in zip(rows[0], rows[1]):
newImArray[x, y] = color
try:
newIm = Image.fromarray(newImArray, "RGBA")
# newIm.show()
newIm.save(os.path.join(output_dir, image_name).split('.')[0]+".png")
image_num = image_num + 1
print('{} out of {}\n path:{}'.format(image_num, len(imgs), os.path.join(output_dir, image_name).split('.')[0]+".png"))
except Exception as e:
print(e)
print("Unexpected error: image {}".format(image_name.split('.')[0]+".png"))
return categories_map
| 38.848739 | 131 | 0.600476 |
e6133811f0f8edb0d7ae8aae3b0a15a4447e5d74 | 6,141 | py | Python | src/cogent3/align/dp_calculation.py | jamesmartini/cogent3 | 5d0aab1871561aa3d4cd6b629be6cc7a23f15c49 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/align/dp_calculation.py | jamesmartini/cogent3 | 5d0aab1871561aa3d4cd6b629be6cc7a23f15c49 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/align/dp_calculation.py | jamesmartini/cogent3 | 5d0aab1871561aa3d4cd6b629be6cc7a23f15c49 | [
"BSD-3-Clause"
] | null | null | null | from cogent3.align import indel_model, pairwise
from cogent3.maths.markov import SiteClassTransitionMatrix
from cogent3.recalculation.definition import (
CalcDefn,
CalculationDefn,
NonParamDefn,
PartitionDefn,
ProbabilityParamDefn,
)
__author__ = "Gavin Huttley and Peter Maxwell"
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2021.5.7a"
__maintainer__ = "Gavin Huttleuy"
__email__ = "gavin.huttley@anu.edu.au"
__status__ = "Production"
class IndelParameterDefn(ProbabilityParamDefn):
# locus means allowed to vary by loci
valid_dimensions = ("edge", "bin", "locus")
independent_by_default = False
default_value = default = 0.4
lower = 0.0001
def make_indel_model_defn(with_indel_params=True, kn=True):
if kn:
klass = indel_model.KnudsenMiyamotoIndelModel
else:
klass = indel_model.SimpleIndelModel
if with_indel_params:
a = IndelParameterDefn("indel_length") # P(extend indel)
r = IndelParameterDefn("indel_rate") # indels per substitution
return CalcDefn(klass, name="indels")(r, a)
else:
# not optimisable parameter, a constant. Another example is the
# alignment in an LikFunc
return NonParamDefn("indel_model")
class FloatWithAttrs(float):
def __new__(cls, value, **kw):
return float.__new__(cls, value)
def __init__(self, value, **kw):
float.__init__(self)
for (n, v) in list(kw.items()):
setattr(self, n, v)
def Edge(seq1, seq2, length, bin_data, switch=1.0, bprobs=None):
# one sequence pair in, potentialy, a tree
bins = len(bin_data)
pair = pairwise.Pair(seq1, seq2)
EP = pair.make_reversible_emission_probs(
[(bin.mprobs, bin.Qd) for bin in bin_data], length
)
tms = [bin.indel.calc_transition_matrix(length) for bin in bin_data]
if bins == 1:
TM = tms[0]
else:
assert bprobs
R = SiteClassTransitionMatrix(switch, bprobs)
TM = R.nestTransitionMatricies(tms)
assert min(TM.Matrix.flat) >= 0, bin_data
return EP.make_pair_HMM(TM)
class BinData(object):
def __init__(self, mprobs, indel, Qd, rate=1.0):
self.mprobs = mprobs
self.indel = indel
self.Qd = Qd
self.rate = rate
def __repr__(self):
return "Bin(Pi, Qd, %s, %s)" % (self.rate, vars(self.indel))
class AnnotateFloatDefn(CalculationDefn):
name = "annot"
def calc(self, value, edge):
return FloatWithAttrs(value, edge=edge)
class ViterbiPogDefn(CalculationDefn):
name = "align"
def calc(self, edge):
return edge.getaln()
class FwdDefn(CalculationDefn):
name = "fwd"
def calc(self, edge):
return edge.get_forward_score(use_cost_function=False)
class _GetAlign:
def __init__(self, edge, length1, length2):
try:
ratio = length1 / (length1 + length2)
except (ZeroDivisionError, FloatingPointError):
ratio = 1.0
self.edge = edge
self.ratio = ratio
def __call__(self):
return self.edge.get_viterbi_path().get_alignable(self.ratio)
class EdgeSumAndAlignDefn(CalculationDefn):
name = "pair"
def calc(self, pog1, pog2, length1, length2, bin):
edge = Edge(pog1, pog2, length1 + length2, [bin])
edge.getaln = _GetAlign(edge, length1, length2)
return edge
class EdgeSumAndAlignDefnWithBins(CalculationDefn):
name = "pair"
def calc(self, pog1, pog2, length1, length2, switch, bprobs, *bin_data):
edge = Edge(pog1, pog2, length1 + length2, bin_data, switch, bprobs)
def _getaln():
ratio = length1 / (length1 + length2)
(vtScore, result) = edge.getViterbiScoreAndAlignable(ratio)
return result
edge.getaln = _getaln
return edge
def _recursive_defns(edge, subst, leaf, edge_defn_constructor, bin_args):
"""A defn which calculates a fwd score with an .edge
attribute which can provide a viterbi alignment which can be
provided to a similar defn"""
scores = []
args = []
for child in edge.children:
if child.istip():
args.append(leaf.select_from_dimension("edge", child.name))
else:
(child_defn, scores2) = _recursive_defns(
child, subst, leaf, edge_defn_constructor, bin_args
)
child_defn = ViterbiPogDefn(child_defn)
scores.extend(scores2)
args.append(child_defn)
child_names = [child.name for child in edge.children]
assert len(child_names) == 2, child_names
child_lengths = subst["length"].across_dimension("edge", child_names)
args.extend(child_lengths)
args.extend(bin_args)
edge_defn = edge_defn_constructor(*args)
# fwd = FwdDefn(edge_defn)
# scores.append(fwd)
return (edge_defn, scores)
def make_forward_tree_defn(
subst_model, tree, bin_names, with_indel_params=True, kn=True
):
"""Pairwise Fwd"""
indel = make_indel_model_defn(with_indel_params, kn)
subst = subst_model.make_fundamental_param_controller_defns(bin_names)
leaf = NonParamDefn("leaf", dimensions=("edge",))
if len(bin_names) > 1:
switch = ProbabilityParamDefn("bin_switch", dimensions=["locus"])
bprobs = PartitionDefn(
[1.0 / len(bin_names) for bin in bin_names],
name="bprobs",
dimensions=["locus"],
dimension=("bin", bin_names),
)
edge_args = [switch, bprobs]
edge_defn_constructor = EdgeSumAndAlignDefnWithBins
else:
edge_args = []
edge_defn_constructor = EdgeSumAndAlignDefn
mprobs = subst["word_probs"]
bin_data = CalcDefn(BinData)(mprobs, indel, subst["Qd"])
bin_data = bin_data.across_dimension("bin", bin_names)
edge_args.extend(bin_data)
(top, scores) = _recursive_defns(
tree, subst, leaf, edge_defn_constructor, edge_args
)
defn = FwdDefn(top)
# defn = SumDefn(*scores)
return AnnotateFloatDefn(defn, top)
| 30.40099 | 76 | 0.657222 |
594189ee7806cb600de0ca3f91a067be6b83729d | 1,257 | py | Python | invenio_app_ils/records/loaders/schemas/changed_by.py | NRodriguezcuellar/invenio-app-ils | 144a25a6c56330b214c6fd0b832220fa71f2e68a | [
"MIT"
] | 41 | 2018-09-04T13:00:46.000Z | 2022-03-24T20:45:56.000Z | invenio_app_ils/records/loaders/schemas/changed_by.py | NRodriguezcuellar/invenio-app-ils | 144a25a6c56330b214c6fd0b832220fa71f2e68a | [
"MIT"
] | 720 | 2017-03-10T08:02:41.000Z | 2022-01-14T15:36:37.000Z | invenio_app_ils/records/loaders/schemas/changed_by.py | NRodriguezcuellar/invenio-app-ils | 144a25a6c56330b214c6fd0b832220fa71f2e68a | [
"MIT"
] | 54 | 2017-03-09T16:05:29.000Z | 2022-03-17T08:34:51.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""ChangedBy schema for marshmallow loader."""
from flask import has_request_context
from flask_login import current_user
from marshmallow import EXCLUDE, Schema, fields, validate
from invenio_app_ils.records.api import IlsRecord
class ChangedBySchema(Schema):
"""ChangedBy schema."""
class Meta:
"""Meta attributes for the schema."""
unknown = EXCLUDE
type = fields.Str(
required=True, validate=validate.OneOf(IlsRecord.CURATOR_TYPES)
)
value = fields.Str()
def set_changed_by(data, prev_record=None):
"""Automatically add the `created_by` and `updated_by` fields."""
if not has_request_context():
return data
changed_by = dict(type="user_id", value=str(current_user.id))
if prev_record:
# updating an already existing record
if "created_by" in prev_record:
data["created_by"] = prev_record["created_by"]
data["updated_by"] = changed_by
else:
# creating a new record
data["created_by"] = changed_by
return data
| 26.744681 | 76 | 0.683373 |
ec676d475342bac95532015233470777bd59873c | 398 | py | Python | 2019/04/program.py | Zargath/AdventOfCode | 863d2e05d3571d0a60f802f32a0faeeaef04e9af | [
"MIT"
] | null | null | null | 2019/04/program.py | Zargath/AdventOfCode | 863d2e05d3571d0a60f802f32a0faeeaef04e9af | [
"MIT"
] | null | null | null | 2019/04/program.py | Zargath/AdventOfCode | 863d2e05d3571d0a60f802f32a0faeeaef04e9af | [
"MIT"
] | null | null | null | limits = (147981, 691423)
#limits = (100, 120)
pwds = []
for pwd in range(*limits):
hasPair = False
isInc = True
spwd = str(pwd)
for i, d in enumerate(spwd):
if i + 1 < len(spwd):
if spwd[i + 1] == spwd[i]:
hasPair = True
if int(spwd[i + 1]) < int(spwd[i]):
isInc = False
if hasPair and isInc:
pwds.append(pwd)
print(len(pwds)) | 20.947368 | 42 | 0.527638 |
ad07a8549a76d2dbff1249c28752ee09e73de7c2 | 3,626 | py | Python | commonlibs/math_tools/IOU.py | floatingstarZ/loc_cls_exp | 8b971db671753d3571914aaa760cc13ac47018e8 | [
"Apache-2.0"
] | null | null | null | commonlibs/math_tools/IOU.py | floatingstarZ/loc_cls_exp | 8b971db671753d3571914aaa760cc13ac47018e8 | [
"Apache-2.0"
] | null | null | null | commonlibs/math_tools/IOU.py | floatingstarZ/loc_cls_exp | 8b971db671753d3571914aaa760cc13ac47018e8 | [
"Apache-2.0"
] | null | null | null | import torch
def singleIOU(gt, bboxes):
"""
:param gt: left top right down
:param bboxes: N * 4
:return:
"""
[x1, y1, x2, y2] = gt
inter_lt = (gt[0].max(bboxes[:, 0]), gt[1].max(bboxes[:, 1]))
inter_rd = (gt[2].min(bboxes[:, 2]), gt[3].min(bboxes[:, 3]))
z = torch.Tensor([0.0])
inter_w = (inter_rd[0] - inter_lt[0]).max(z)
inter_h = (inter_rd[1] - inter_lt[1]).max(z)
inter_area = inter_w * inter_h
area_gt = ((x2 - x1)*(y2 - y1)).max(z)
if area_gt <= 0:
return torch.zeros(bboxes.shape[0])
area_bboxes = ((bboxes[:, 2] - bboxes[:, 0]) * \
(bboxes[:, 3] - bboxes[:, 1])).max(z)
IOU = inter_area / (area_gt + area_bboxes - inter_area)
return IOU
def IOU(gts, bboxes):
"""
:param gts: M * 4
:param bboxes: N * 4
:return: M * N
"""
IOU = []
for gt in gts:
IOU.append(singleIOU(gt, bboxes).reshape(1, -1))
return torch.cat(IOU, dim=0)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
If ``is_aligned`` is ``False``, then calculate the ious between each bbox
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (m, 4)
bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
must be equal.
mode (str): "iou" (intersection over union) or iof (intersection over
foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert mode in ['iou', 'iof']
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
if is_aligned:
lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
else:
ious = overlap / area1
else:
lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
overlap = wh[:, :, 0] * wh[:, :, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1[:, None] + area2 - overlap)
else:
ious = overlap / (area1[:, None])
return ious
if __name__ == '__main__':
gts = torch.Tensor([[3, 3, 6, 6],
[0, 0, 1, 1],
[1, 1, 1, 1]])
bboxes = torch.Tensor([[3,3,6,6],
[4,4,7,7],
[2,2,5,5],
[3,2,6,5]])
print(singleIOU(gts[0], bboxes))
print(singleIOU(torch.Tensor([0, 0, 1, 1]), bboxes))
print(singleIOU(torch.Tensor([1, 1, 1, 1]), bboxes))
print(IOU(gts, bboxes))
| 26.859259 | 79 | 0.488969 |
d35a681e3717e8bb5e70b8f8d6876277ca2015be | 678 | py | Python | codigo/Live184/ajuda.py | BrunoPontesLira/live-de-python | da6e463a89ed90d9efaa1c34088ab6460e949de1 | [
"MIT"
] | 2 | 2017-06-05T23:32:00.000Z | 2017-06-08T01:01:35.000Z | codigo/Live184/ajuda.py | BrunoPontesLira/live-de-python | da6e463a89ed90d9efaa1c34088ab6460e949de1 | [
"MIT"
] | null | null | null | codigo/Live184/ajuda.py | BrunoPontesLira/live-de-python | da6e463a89ed90d9efaa1c34088ab6460e949de1 | [
"MIT"
] | null | null | null | from rich.console import Console
console = Console()
console.print("\npra rodar comandos: [bold]make alvo[/bold]")
with open("./Makefile", "r") as file:
for line in file.readlines():
if line.find("##") >= 0:
if line.find("@") >= 0:
to_print = line.split("@")[-1].strip().capitalize()
console.print(f"\n{to_print}", style="bold green")
else:
values = line.split("##")
target = values[0]
description = values[-1].strip().capitalize()
target = target.split(":")[0].strip()
console.print(f" - [bold]{target}:[/bold] {description}")
| 37.666667 | 73 | 0.514749 |
ebe7308a7589fe600641317f73be652b6fa9d3db | 671 | py | Python | src/arg_pools/ssp_finetuning_imbalanced_cifar10_imb_0_01.py | zeyademam/active_learning | fc90eaed32ba5aeb88542fa4f6e8fa9d4fdd80ee | [
"MIT"
] | 41 | 2021-11-25T02:58:56.000Z | 2022-03-21T02:44:58.000Z | src/arg_pools/ssp_finetuning_imbalanced_cifar10_imb_0_01.py | zeyademam/active_learning | fc90eaed32ba5aeb88542fa4f6e8fa9d4fdd80ee | [
"MIT"
] | 1 | 2021-12-01T07:46:44.000Z | 2021-12-08T02:07:10.000Z | src/arg_pools/ssp_finetuning_imbalanced_cifar10_imb_0_01.py | zeyademam/active_learning | fc90eaed32ba5aeb88542fa4f6e8fa9d4fdd80ee | [
"MIT"
] | 2 | 2021-11-29T06:09:56.000Z | 2021-12-12T10:35:58.000Z | import os
dirname = os.path.dirname
args_pool = {
"imbalanced_cifar10": {
"eval_split": 0.1,
"loader_tr_args": {"batch_size": 128, "num_workers": 2},
"loader_te_args": {"batch_size": 100, "num_workers": 2},
"optimizer": "SGD",
"optimizer_args": {"lr": 0.002, "weight_decay": 0, "momentum": 0.9},
"lr_scheduler": "CosineAnnealingLR",
"lr_scheduler_args": {"T_max": 200},
"init_pretrained_ckpt_path": "../pretrained_ckpt/cifar10/simclr_imb_pretrain0_01.tar",
"required_key": ["encoder"],
"skip_key": ["linear"],
"replace_key": None,
"imbalanced_training": True,
},
}
| 31.952381 | 94 | 0.593145 |
28d72b343274d84329834121e804cf171e40bdcf | 1,761 | py | Python | app.py | Chenhuping/flask-restful-example | 8ddbca623e80684ea7fd9fce851b2c9251601dcc | [
"MIT"
] | 1 | 2019-06-27T01:03:38.000Z | 2019-06-27T01:03:38.000Z | app.py | Chenhuping/flask-restful-example | 8ddbca623e80684ea7fd9fce851b2c9251601dcc | [
"MIT"
] | null | null | null | app.py | Chenhuping/flask-restful-example | 8ddbca623e80684ea7fd9fce851b2c9251601dcc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import string
import flask_restful
from flask import Flask, abort, jsonify
from hashids import Hashids
from models import db
from common import Code, pretty_result
app = Flask(__name__)
hash_ids = Hashids(salt='hvwptlmj129d5quf', min_length=8, alphabet=string.ascii_lowercase + string.digits)
# 保留flask原生异常处理
handle_exception = app.handle_exception
handle_user_exception = app.handle_user_exception
def _custom_abort(http_status_code, **kwargs):
"""
自定义abort 400响应数据格式
"""
if http_status_code == 400:
message = kwargs.get('message')
if isinstance(message, dict):
param, info = list(message.items())[0]
data = '{}:{}!'.format(param, info)
return abort(jsonify(pretty_result(Code.PARAM_ERROR, data=data)))
else:
return abort(jsonify(pretty_result(Code.PARAM_ERROR, data=message)))
return abort(http_status_code)
def _access_control(response):
"""
解决跨域请求
"""
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET,HEAD,PUT,PATCH,POST,DELETE'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type'
response.headers['Access-Control-Max-Age'] = 86400
return response
def create_app(config):
"""
创建app
"""
# 添加配置
app.config.from_object(config)
# 解决跨域
app.after_request(_access_control)
# 自定义abort 400 响应数据格式
flask_restful.abort = _custom_abort
# 数据库初始化
db.init_app(app)
# 注册蓝图
from apis import api_v1
app.register_blueprint(api_v1, url_prefix='/api/v1')
# 使用flask原生异常处理程序
app.handle_exception = handle_exception
app.handle_user_exception = handle_user_exception
return app
| 27.952381 | 106 | 0.69222 |
01602f6207598871f8f56ba7059ca42759f3a103 | 3,279 | py | Python | postProcessor.py | QuMuLab/temporal-nl-to-pddl | 61899bac8608baa719687adbbf0a9928fc06a106 | [
"MIT"
] | null | null | null | postProcessor.py | QuMuLab/temporal-nl-to-pddl | 61899bac8608baa719687adbbf0a9928fc06a106 | [
"MIT"
] | null | null | null | postProcessor.py | QuMuLab/temporal-nl-to-pddl | 61899bac8608baa719687adbbf0a9928fc06a106 | [
"MIT"
] | null | null | null | # This class provides methods to process the output of the LLM before it is returned to the user.
class PostProcessor:
# takes a PDDL condition or effect statement and makes it into a string that can be converted to
# a list using the 'eval' function
def listify(result):
result = "['" + result + "']"
result = result.replace("['(and ","['")
result = result.replace(")'] ","']")
result = result.replace(")) ","))', '")
result = result.replace(", ]","]")
return result
# returns a list of indices, where each index corresponds to an item in the list
# of code segments that contains a predicate which is not present in the input text.
def checkPreds(codeLs, inputText, preds):
indices = []
count = 0
for code in codeLs:
predCount = 0
for ls in preds:
string = "pred0"+str(predCount)
if (string in code) and (string not in inputText):
indices.append(count)
predCount += 1
count += 1
return indices
# returns a list of indices, where each index corresponds to an item in the list of
# code segments that does not have correct PDDL syntax
def checkSyntax(codeLs, params, preds):
count = 0
indices = []
for code in codeLs:
code = '0' + code
code = code.replace("(","+1")
code = code.replace(")","-1")
predCount = 0
for ls in preds:
string = "pred0"+str(predCount)
code = code.replace(string,"+1")
predCount += 1
code = code.replace("?","/")
paramCount = 0
for ls in params:
string = "param0"+str(paramCount)
code = code.replace(string,"1")
paramCount += 1
code = code.replace("at start","")
code = code.replace("at end","")
code = code.replace("over all","")
code = code.replace("not","")
code = code.replace("?","?")
correctSyntax = False
try:
num = eval(code)
if int(num):
if (num == 1):
correctSyntax = True # correct syntax
else:
correctSyntax = True # incorrect number of brackets, but this is ignored
except (SyntaxError, NameError):
pass
if not correctSyntax:
indices.append(count)
count += 1
return indices
# modifies the list of code segments so that it cotains only those segments with
# predicates that are present in the input, and that have correct syntax
def removeIrrelevantCode(codeLs, inputText, params, preds):
indices1 = PostProcessor.checkPreds(codeLs, inputText, preds)
indices1.sort(reverse=True)
for ind in indices1:
if ind < len(codeLs):
codeLs.pop(ind)
indices2 = PostProcessor.checkSyntax(codeLs, params, preds)
indices2.sort(reverse=True)
for ind in indices2:
if ind < len(codeLs):
codeLs.pop(ind)
return codeLs
| 39.506024 | 100 | 0.534614 |
374b7123b4cf00fa24766050084ef175aeadf06c | 6,164 | py | Python | var/spack/repos/builtin/packages/py-pillow/package.py | mtmiller/spack | c97c135f1dbe24955048fcc4f0f98281ef0c9300 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-10-04T20:05:45.000Z | 2021-10-04T20:05:45.000Z | var/spack/repos/builtin/packages/py-pillow/package.py | mtmiller/spack | c97c135f1dbe24955048fcc4f0f98281ef0c9300 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2021-05-12T05:53:01.000Z | 2022-03-18T17:30:25.000Z | var/spack/repos/builtin/packages/py-pillow/package.py | mtmiller/spack | c97c135f1dbe24955048fcc4f0f98281ef0c9300 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 7 | 2018-09-13T18:04:56.000Z | 2020-03-18T20:52:06.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyPillowBase(PythonPackage):
"""Base class for Pillow and its fork Pillow-SIMD."""
maintainers = ['adamjstewart']
provides('pil')
# These defaults correspond to Pillow defaults
# https://pillow.readthedocs.io/en/stable/installation.html#external-libraries
variant('zlib', default=True, description='Compressed PNG functionality')
variant('jpeg', default=True, description='JPEG functionality')
variant('tiff', default=False, description='Compressed TIFF functionality')
variant('freetype', default=False, description='Type related services')
variant('lcms', default=False, description='Color management')
variant('webp', default=False, description='WebP format')
variant('webpmux', default=False, description='WebP metadata')
variant('jpeg2000', default=False, description='JPEG 2000 functionality')
variant('imagequant', default=False, description='Improved color quantization')
variant('xcb', default=False, description='X11 screengrab support')
# Required dependencies
# https://pillow.readthedocs.io/en/latest/installation.html#notes
depends_on('python@3.6:3.9', when='@8:', type=('build', 'run'))
depends_on('python@3.5:3.8', when='@7.0:7.2', type=('build', 'run'))
depends_on('python@2.7:2.8,3.5:3.8', when='@6.2.1:6.2.2', type=('build', 'run'))
depends_on('python@2.7:2.8,3.5:3.7', when='@6.0:6.2.0', type=('build', 'run'))
depends_on('python@2.7:2.8,3.4:3.7', when='@5.2:5.4', type=('build', 'run'))
depends_on('python@2.7:2.8,3.4:3.6', when='@5.0:5.1', type=('build', 'run'))
depends_on('python@2.7:2.8,3.3:3.6', when='@4.0:4', type=('build', 'run'))
depends_on('python@2.6:2.8,3.2:3.5', when='@2:3', type=('build', 'run'))
depends_on('python@2.4:2.7', when='@:1', type=('build', 'run'))
depends_on('py-setuptools', type='build')
# Optional dependencies
depends_on('zlib', when='+zlib')
depends_on('jpeg', when='+jpeg')
depends_on('libtiff', when='+tiff')
depends_on('freetype', when='+freetype')
depends_on('lcms@2:', when='+lcms')
depends_on('libwebp', when='+webp')
depends_on('libwebp+libwebpmux+libwebpdemux', when='+webpmux')
depends_on('openjpeg', when='+jpeg2000')
depends_on('libimagequant', when='+imagequant')
depends_on('libxcb', when='+xcb')
conflicts('+webpmux', when='~webp', msg='Webpmux relies on WebP support')
conflicts('+imagequant', when='@:3.2', msg='imagequant support was added in 3.3')
conflicts('+xcb', when='@:7.0', msg='XCB support was added in 7.1')
phases = ['build_ext', 'install']
def patch(self):
"""Patch setup.py to provide library and include directories
for dependencies."""
library_dirs = []
include_dirs = []
for dep in self.spec.dependencies(deptype='link'):
query = self.spec[dep.name]
library_dirs.extend(query.libs.directories)
include_dirs.extend(query.headers.directories)
setup = FileFilter('setup.py')
setup.filter('library_dirs = []',
'library_dirs = {0}'.format(library_dirs), string=True)
setup.filter('include_dirs = []',
'include_dirs = {0}'.format(include_dirs), string=True)
def variant_to_cfg(variant):
able = 'enable' if '+' + variant in self.spec else 'disable'
return '{0}-{1}=1\n'.format(able, variant)
with open('setup.cfg', 'a') as setup:
setup.write('[build_ext]\n')
variants = list(self.spec.variants)
if self.spec.satisfies('@:7.0'):
variants.remove('xcb')
if self.spec.satisfies('@:3.2'):
variants.remove('imagequant')
for variant in variants:
setup.write(variant_to_cfg(variant))
setup.write('rpath={0}\n'.format(':'.join(self.rpath)))
setup.write('[install]\n')
def setup_build_environment(self, env):
env.set('MAX_CONCURRENCY', str(make_jobs))
# Tests need to be re-added since `phases` was overridden
run_after('install')(
PythonPackage._run_default_install_time_test_callbacks)
run_after('install')(PythonPackage.sanity_check_prefix)
class PyPillow(PyPillowBase):
"""Pillow is a fork of the Python Imaging Library (PIL). It adds image
processing capabilities to your Python interpreter. This library supports
many file formats, and provides powerful image processing and graphics
capabilities."""
homepage = "https://python-pillow.org/"
pypi = "Pillow/Pillow-7.2.0.tar.gz"
version('8.0.0', sha256='59304c67d12394815331eda95ec892bf54ad95e0aa7bc1ccd8e0a4a5a25d4bf3')
version('7.2.0', sha256='97f9e7953a77d5a70f49b9a48da7776dc51e9b738151b22dacf101641594a626')
version('7.0.0', sha256='4d9ed9a64095e031435af120d3c910148067087541131e82b3e8db302f4c8946')
version('6.2.2', sha256='db9ff0c251ed066d367f53b64827cc9e18ccea001b986d08c265e53625dab950')
version('6.2.1', sha256='bf4e972a88f8841d8fdc6db1a75e0f8d763e66e3754b03006cbc3854d89f1cb1')
version('6.2.0', sha256='4548236844327a718ce3bb182ab32a16fa2050c61e334e959f554cac052fb0df')
version('6.0.0', sha256='809c0a2ce9032cbcd7b5313f71af4bdc5c8c771cb86eb7559afd954cab82ebb5')
version('5.4.1', sha256='5233664eadfa342c639b9b9977190d64ad7aca4edc51a966394d7e08e7f38a9f')
version('5.1.0', sha256='cee9bc75bff455d317b6947081df0824a8f118de2786dc3d74a3503fd631f4ef')
version('3.2.0', sha256='64b0a057210c480aea99406c9391180cd866fc0fd8f0b53367e3af21b195784a')
version('3.0.0', sha256='ad50bef540fe5518a4653c3820452a881b6a042cb0f8bb7657c491c6bd3654bb')
for ver in [
'7.2.0', '7.0.0', '6.2.2', '6.2.1', '6.2.0', '6.0.0',
'5.4.1', '5.1.0', '3.2.0', '3.0.0'
]:
provides('pil@' + ver, when='@' + ver)
| 48.15625 | 95 | 0.655419 |
3593e7a5f7f3830e4b786b7f4775df1e5c2a79c2 | 1,232 | py | Python | consoletests.py | theflyingfire/random_python_scripts | 6eaf902c983cfdbd50f5c61b3d8a89bb789c9c26 | [
"MIT"
] | null | null | null | consoletests.py | theflyingfire/random_python_scripts | 6eaf902c983cfdbd50f5c61b3d8a89bb789c9c26 | [
"MIT"
] | null | null | null | consoletests.py | theflyingfire/random_python_scripts | 6eaf902c983cfdbd50f5c61b3d8a89bb789c9c26 | [
"MIT"
] | null | null | null | #print("hello", end="")
#print("\b\b\b\b\b\btest")
import time
sleep_time = 0.05
print("loading... ", end="")
def print_processing_indicator(num):
for i in range(num):
print("\b\\", end="")
#time.sleep(sleep_time)
print("\b|", end="")
#time.sleep(sleep_time)
print("\b/", end="")
#time.sleep(sleep_time)
print("\b-", end="")
#time.sleep(sleep_time)
print("\b\\", end="")
#time.sleep(sleep_time)
print("\b|", end="")
#time.sleep(sleep_time)
#print("\b ᓚᘏᗢ", end="\b\b\b")
#time.sleep(sleep_time)
def reset_line(num):
for i in range(num):
print("\b", end="")
print_processing_indicator(20000)
reset_line(13)
print("loading complete ✔")
print("starting server... ", end="")
print_processing_indicator(20000)
reset_line(21)
print("server up and running ✔")
print("generating data: ", end="")
for i in range(100):
print(".", end="")
time.sleep(0.1)
reset_line(120)
print("data generation complete ✔ \n")
input("") | 25.142857 | 162 | 0.491071 |
aaa94d58ef441f09bf64a392dc25f64fac8c4134 | 3,024 | py | Python | pretend.py | di/pretend | 614a3d5ee6320d319d57193ae585d8a72d1ae315 | [
"BSD-3-Clause"
] | 2 | 2016-08-24T22:20:20.000Z | 2022-01-24T12:15:43.000Z | pretend.py | di/pretend | 614a3d5ee6320d319d57193ae585d8a72d1ae315 | [
"BSD-3-Clause"
] | null | null | null | pretend.py | di/pretend | 614a3d5ee6320d319d57193ae585d8a72d1ae315 | [
"BSD-3-Clause"
] | null | null | null | import functools
import sys
PY3K = sys.version_info >= (3,)
methods = set([
"__iter__",
"__len__",
"__contains__",
"__getitem__",
"__setitem__",
"__delitem__",
"__enter__",
"__exit__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__add__",
"__and__",
"__divmod__",
"__floordiv__",
"__lshift__",
"__mod__",
"__mul__",
"__or__",
"__pow__",
"__rshift__",
"__sub__",
"__truediv__",
"__xor__",
"__repr__",
])
if PY3K:
methods.add("__next__")
methods.add("__bool__")
else:
methods.add("__div__")
methods.add("__nonzero__")
MAGIC_METHODS = frozenset(methods)
del methods
def _build_magic_dispatcher(method):
def inner(self, *args, **kwargs):
return self.__dict__[method](*args, **kwargs)
inner.__name__ = method
return inner
class stub(object):
_classes_cache = {}
def __new__(cls, **kwargs):
magic_methods_present = MAGIC_METHODS.intersection(kwargs)
if magic_methods_present not in cls._classes_cache:
attrs = dict(
(method, _build_magic_dispatcher(method))
for method in magic_methods_present
)
attrs["__module__"] = cls.__module__
cls._classes_cache[magic_methods_present] = (
type("stub", (cls,), attrs)
)
new_cls = cls._classes_cache[magic_methods_present]
return super(stub, new_cls).__new__(new_cls)
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return '<stub(%s)>' % ', '.join([
'%s=%r' % (key, val)
for key, val in self.__dict__.items()
])
def raiser(exc):
if (
not (
isinstance(exc, BaseException) or
isinstance(exc, type) and issubclass(exc, BaseException)
)
):
raise TypeError("exc must be either an exception instance or class.")
def inner(*args, **kwargs):
raise exc
return inner
class call(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
if not isinstance(other, call):
return NotImplemented
return self.args == other.args and self.kwargs == other.kwargs
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((
self.args,
frozenset(self.kwargs.items())
))
def __repr__(self):
args = ", ".join(map(repr, self.args))
kwargs = ", ".join("%s=%r" % (k, v) for k, v in self.kwargs.items())
comma = ", " if args and kwargs else ""
return "<call(%s%s%s)>" % (args, comma, kwargs)
def call_recorder(func):
@functools.wraps(func)
def inner(*args, **kwargs):
inner.calls.append(call(*args, **kwargs))
return func(*args, **kwargs)
inner.calls = []
return inner
| 22.736842 | 77 | 0.570437 |
3396d4fe6cee8cda4077cb07fa66505e0178bc41 | 2,612 | py | Python | 6_binary_search_tree.py | inidal/break | 225b12db51b06b4b5e8314f6016ba8824e7faa01 | [
"MIT"
] | null | null | null | 6_binary_search_tree.py | inidal/break | 225b12db51b06b4b5e8314f6016ba8824e7faa01 | [
"MIT"
] | null | null | null | 6_binary_search_tree.py | inidal/break | 225b12db51b06b4b5e8314f6016ba8824e7faa01 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, value):
self.left = None
self.right = None
self.value = value
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, value):
# If tree is empty
if self.root is None:
new_node = Node(value)
self.root = new_node
new_node.value = value
else:
tmp = self.root # Current node
# While not found
while tmp.value != value:
# If less, then go left
if value < tmp.value:
if tmp.left is None:
new_node = Node(value)
tmp.left = new_node
break
else:
# Progress left in the tree
tmp = tmp.left
# If bigger, then go right
elif value > tmp.value:
if tmp.right is None:
new_node = Node(value)
tmp.right = new_node
break
else:
# Progress right in the tree
tmp = tmp.right
# Print message if value inserted already exists in the tree
if value == tmp.value:
print(f"Value {value} already exists in the tree.")
def lookup(self, value):
tmp = self.root # Current node
# Nodes road
road = []
# While not found
while tmp.value != value:
if value < tmp.value:
if tmp.left is None:
break
else:
# Progress left in the tree
tmp = tmp.left
road.append('left')
# If bigger, then go right
elif value > tmp.value:
if tmp.right is None:
break
else:
# Progress right in the tree
tmp = tmp.right
road.append('right')
# Print message if value inserted already exists in the tree
if value == tmp.value:
print(f"Value {value} do exists in the tree.")
print(f"How to get there? You only need to go {road if len(road) > 0 else 'to the root'}.")
else:
print("Sorry, value not found.")
t = BinarySearchTree()
lst = [45, 77, 50, 12, 7, 29, 26, 6, 85]
for i in lst:
t.insert(i)
t.lookup(85)
| 28.703297 | 103 | 0.437213 |
6ba170a327e54572409cc3c0cca9d2d1516a6166 | 1,509 | py | Python | libraries/botbuilder-azure/setup.py | PrettyWood/botbuilder-python | ab79f6b60066b05a00f729d6cb1d8bee30a786e2 | [
"MIT"
] | null | null | null | libraries/botbuilder-azure/setup.py | PrettyWood/botbuilder-python | ab79f6b60066b05a00f729d6cb1d8bee30a786e2 | [
"MIT"
] | null | null | null | libraries/botbuilder-azure/setup.py | PrettyWood/botbuilder-python | ab79f6b60066b05a00f729d6cb1d8bee30a786e2 | [
"MIT"
] | 1 | 2022-02-24T10:23:28.000Z | 2022-02-24T10:23:28.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup
REQUIRES = [
"azure-cosmos==3.2.0",
"azure-storage-blob==2.1.0",
"botbuilder-schema==4.11.0",
"botframework-connector==4.11.2",
"jsonpickle",
]
TEST_REQUIRES = ["aiounittest==1.3.0"]
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "botbuilder", "azure", "about.py")) as f:
package_info = {}
info = f.read()
exec(info, package_info)
with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name=package_info["__title__"],
version=package_info["__version__"],
url=package_info["__uri__"],
author=package_info["__author__"],
description=package_info["__description__"],
keywords=["BotBuilderAzure", "bots", "ai", "botframework", "botbuilder", "azure"],
long_description=long_description,
long_description_content_type="text/x-rst",
license=package_info["__license__"],
packages=["botbuilder.azure"],
install_requires=REQUIRES + TEST_REQUIRES,
tests_require=TEST_REQUIRES,
classifiers=[
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 31.4375 | 86 | 0.671306 |
a2d63b0105e5662fd8f9e2cc2b23b450966c468d | 11,856 | py | Python | scripts/FPS2ARB.py | Ecotrust/ARBcarbon | 110f0d009fa6bb57d3667ebe6d414adbed2e87ab | [
"BSD-3-Clause"
] | 5 | 2020-02-28T12:58:26.000Z | 2021-09-15T06:15:34.000Z | scripts/FPS2ARB.py | Ecotrust/ARBcarbon | 110f0d009fa6bb57d3667ebe6d414adbed2e87ab | [
"BSD-3-Clause"
] | 24 | 2016-05-13T17:55:22.000Z | 2021-11-09T15:18:56.000Z | scripts/FPS2ARB.py | Ecotrust/ARBcarbon | 110f0d009fa6bb57d3667ebe6d414adbed2e87ab | [
"BSD-3-Clause"
] | 4 | 2016-09-08T21:10:15.000Z | 2020-02-28T12:58:30.000Z | """
FPS2ARB.
FPS-to-ARB Carbon Calculation.
Takes two CSV files in current working directory that were exported from FPS
(Forest Planning and Projection System) database containing forest inventory
data, calculates carbon storage for each tree, and documents the calculation
parameters and writes outputs to a new CSV file, one for each property
detected in the FPS_ADMIN table/CSV.
Usage:
FPS2ARB.py [options]
FPS2ARB.py [-h | --help]
FPS2ARB.py [--version]
Options:
-h --help Show this screen
--version Show version
--property <property> Name of property to include
--year <year> Year for calculations to be made
--region <region> Region for equations (WOR, EOR, WWA, EWA, CA)
"""
import os
from docopt import docopt
import pandas as pd
import math
import time
from ARB_Volume_Equations import *
from ARB_Biomass_Equations import *
from ARB_Equation_Assignments import *
if __name__ == "__main__":
args = docopt(doc, version='1.0')
properties_to_run = args['--property']
report_yr = args['--year']
region = args['--region']
# Read in the CSV files that were exported from FPS
try:
FPS_DBHCLS = pd.read_csv('DBHCLS.csv')
FPS_ADMIN = pd.read_csv('ADMIN.csv')
print "Successfully read in DBHCLS and ADMIN tables.\n"
except IOError:
print "Could not find your DBHCLS and ADMIN CSV files. Please export them from your FPS database in to the same folder as this script.\n"
# stand_list, a dataframe of all stands in the ADMIN table
stand_list = FPS_ADMIN[['STD_ID', 'RPT_YR', 'MSMT_YR', 'Property', 'AREA_GIS', 'AREA_RPT']]
# tree_list, a dataframe of all the trees in the DBHCLS table
tree_list = FPS_DBHCLS[['RPT_YR', 'STD_ID', 'PlotTree', 'GRP', 'SPECIES', 'TREES', 'DBH', 'HEIGHT']]
# add Property Name and GIS_Area to tree_list
tree_list = tree_list.merge(stand_list[['STD_ID', 'AREA_GIS', 'AREA_RPT', 'Property']], on='STD_ID')
# report_yr = None
# properties_to_run = None
# region = None
# Prompt user to specify a single property
all_properties = pd.unique(stand_list['Property']).tolist()
if not properties_to_run:
print str(len(all_properties)) + ' properties found in the ADMIN table:',
print ', '.join(str(prop) for prop in all_properties) + "\n"
while True:
chosen_prop = raw_input('Choose a property to run, or type ALL: ')
if chosen_prop.lower() == 'all':
properties_to_run = all_properties
print 'Running carbon calculations for all properties.\n'
break
elif chosen_prop in all_properties:
properties_to_run = [chosen_prop]
print 'Running carbon calculations for ' + properties_to_run[0] + '\n'
break
else:
print 'Property not recognized. Try again.\n'
# Prompt user to specify a region
if not region:
while True:
region = raw_input('Choose which regional volume equations to use (WOR, EOR, WWA, EWA, or CA): ')
if region in ['WOR', 'EOR', 'WWA', 'EWA', 'CA']:
print 'All calculations to be done using ' + region + ' equations.\n'
break
else:
print 'Region not recognized. Try again.\n'
# Prompt user to specify a single report year
all_years = sorted(pd.unique(tree_list['RPT_YR']).tolist())
if not report_yr:
while True:
report_yr = raw_input('Choose a year to run (RPT_YR from DBHCLS table), or type ALL: ')
if report_yr.lower() == 'all':
report_yr = all_years
print 'Running all years.\n'
break
elif int(report_yr) in all_years:
report_yr = [int(report_yr)]
print 'Running calculations for ' + str(report_yr[0]) + ' only.\n'
break
else:
print report_yr + ' not found in DBHCLS table. Try again using one of these:'
print ', '.join(str(yr) for yr in all_years) + '\n'
# check if all species are recognized from user's crosswalk table
DBHCLS_spp = pd.unique(FPS_DBHCLS.SPECIES) # the species found in the FPS Database
spp_used_list = species_used.Your_species_code.tolist() # species found in the user's crosswalk table
print "Found " + str(len(species_used)) + " species in the species crosswalk spreadsheet and " + str(len(DBHCLS_spp)) + " species in the FPS DBHCLS table.\n"
# if not, list the species that are not recognized
missing_spp = [spp for spp in DBHCLS_spp if spp not in spp_used_list] # species_used comes from crosswalk table, in ARB_Equation_Assignments script
if len(missing_spp) >0:
print str(len(missing_spp)) + " species found in the FPS DBHCLS table but missing from the species crosswalk spreadsheet will not have carbon storage calculated:"
print "(" + ', '.join(str(spp) for spp in missing_spp) + ")\n"
else:
print "All species will have carbon calculations.\n"
# hold out RPT_YR years that were not requested by user
tree_list = tree_list.loc[tree_list['RPT_YR'].isin(report_yr)] # only include trees from that year
# hold out trees from any properties not requested by user
stands_in_properties_to_run = pd.unique(stand_list['STD_ID'].loc[stand_list['Property'].isin(properties_to_run)]).tolist()
tree_list = tree_list.loc[tree_list['STD_ID'].isin(stands_in_properties_to_run)]
# hold out any trees that were not in species crosswalk spreadsheet
if len(missing_spp) >0:
missing_trees = tree_list.loc[tree_list['SPECIES'].isin(missing_spp)]
tree_list = tree_list.loc[~tree_list['SPECIES'].isin(missing_spp)]
# hold out any trees that are not living, based on a GRP code
live_trees = ['..', '.R', '.I', '.L', '.W'] # codes for live, residual, ingrowth, leave, and wildlife trees
dead_trees = tree_list.loc[~tree_list['GRP'].isin(live_trees)] # trees with codes other than live_trees
tree_list = tree_list.loc[tree_list['GRP'].isin(live_trees)] # trees only with recognized live_trees codes
# add new columns to the tree_list for individual trees:
# add the FIA region being used
tree_list['FIA_Region'] = region
# record the ARB Volume Equation Number to be used for each tree
tree_list['Vol_Eq'] = tree_list['SPECIES'].apply(lambda x: getattr(species_classes[x], region+'_VOL').__name__.split('_')[1])
# species_classe is a dictionary from ARB_Equation_Assignments.py
# species_classes contains class objects with attributes for each species such as the volume and biomass equation numbers, etc.
# calculate Total Cubic Volume (CVTS, cubic volume including top and stump) for each tree
def get_vol(row):
return getattr(species_classes[row.SPECIES], region+'_VOL')().calc(row.DBH, row.HEIGHT, 'CVTS')
tree_list['CVTS_ft3'] = tree_list.apply(get_vol, axis = 1) # calculate cubic volume for each row
# calculate boardfoot volume for each tree
def get_BF(row):
if getattr(species_classes[row.SPECIES], 'wood_type') == 'HW':
return getattr(species_classes[row.SPECIES], region+'_VOL')().calc(row.DBH, row.HEIGHT, 'SV816')
elif getattr(species_classes[row.SPECIES], 'wood_type') == 'SW' and region in ['WWA', 'WOR']:
return getattr(species_classes[row.SPECIES], region+'_VOL')().calc(row.DBH, row.HEIGHT, 'SV632')
elif getattr(species_classes[row.SPECIES], 'wood_type') == 'SW' and region in ['EWA', 'EOR', 'CA']:
return getattr(species_classes[row.SPECIES], region+'_VOL')().calc(row.DBH, row.HEIGHT, 'SV616')
tree_list['Scrib_BF'] = tree_list.apply(get_BF, axis = 1) # calculate scribner volume for each row
# Wood Density and Stem Biomass, density in units of lbs/ft3 and cubic volume in ft3
tree_list['Wood_density_lbs_ft3'] = tree_list['SPECIES'].apply(lambda x: getattr(species_classes[x], 'wood_dens'))
tree_list['Stem_biomass_UStons'] = (tree_list['CVTS_ft3'] * tree_list['Wood_density_lbs_ft3'])/2000.0
tree_list['Stem_biomass_kg'] = (tree_list['CVTS_ft3'] * tree_list['Wood_density_lbs_ft3'])*0.453592
# Bark biomass equation and calculation
tree_list['BarkBio_Eq'] = tree_list['SPECIES'].apply(lambda x: getattr(species_classes[x], region+'_BB').func_name.split('_')[1])
def get_bark_bio(row): # convert DBH and HT from English to Metric units
# equations use metric units, so convert DBH and HT from English to Metric units
# equations return units of kg
return check_BB(row.DBH*2.54, row.HEIGHT*0.3048, row.Wood_density_lbs_ft3, getattr(species_classes[row.SPECIES], region+'_BB'))
tree_list['Bark_biomass_kg'] = tree_list.apply(get_bark_bio, axis = 1)
# Branch biomass equation and calculation
tree_list['BranchBio_Eq'] = tree_list['SPECIES'].apply(lambda x: getattr(species_classes[x], region+'_BLB').func_name.split('_')[1])
def get_branch_bio(row):
# equations use metric units, so convert DBH and HT from English to Metric units
# equations return units of kg
return check_BLB(row.DBH*2.54, row.HEIGHT*0.3048, getattr(species_classes[row.SPECIES], region+'_BLB'))
tree_list['Branch_biomass_kg'] = tree_list.apply(get_branch_bio, axis = 1)
# Above-ground biomass
tree_list['Aboveground_biomass_kg'] = tree_list['Stem_biomass_kg'] + tree_list['Bark_biomass_kg'] + tree_list['Branch_biomass_kg']
# Below-ground biomass, calculated using Cairns et al. (1997) Equation #1
tree_list['Belowground_biomass_kg'] = tree_list['Aboveground_biomass_kg'].apply(cairns)
# Live CO2e for each tree
tree_list['AbovegroundLive_tCO2e'] = tree_list['Aboveground_biomass_kg'] / 1000.0 * 0.5 * 44.0/12.0
tree_list['BelowgroundLive_tCO2e'] = tree_list['Belowground_biomass_kg'] / 1000.0 * 0.5 * 44.0/12.0
tree_list['LiveTree_carbon_tCO2e'] = tree_list['AbovegroundLive_tCO2e'] + tree_list['BelowgroundLive_tCO2e']
# Live tree carbon per acre
tree_list['AbovegroundLive_tCO2e_ac'] = tree_list['AbovegroundLive_tCO2e'] * tree_list['TREES']
tree_list['BelowgroundLive_tCO2e_ac'] = tree_list['BelowgroundLive_tCO2e'] * tree_list['TREES']
tree_list['LiveTree_carbon_tCO2e_ac'] = tree_list['LiveTree_carbon_tCO2e'] * tree_list['TREES']
# Total carbon across property
tree_list['LiveTree_carbon_tCO2e_total_AreaGIS'] = tree_list['LiveTree_carbon_tCO2e_ac'] * tree_list['AREA_GIS']
tree_list['LiveTree_carbon_tCO2e_total_AreaRPT'] = tree_list['LiveTree_carbon_tCO2e_ac'] * tree_list['AREA_RPT']
# add back in unrecognized species and dead_trees
tree_list = tree_list.append([missing_trees, dead_trees], ignore_index=True)
# sort the tree_list
tree_list = tree_list.sort_values(by = ['Property', 'RPT_YR', 'STD_ID', 'PlotTree'])
# column order to use for CSV output
cols = ['Property', 'RPT_YR', 'STD_ID', 'AREA_GIS', 'AREA_RPT', 'PlotTree', 'GRP', 'SPECIES', 'DBH', 'HEIGHT',
'TREES', 'FIA_Region', 'Vol_Eq', 'BarkBio_Eq', 'BranchBio_Eq', 'CVTS_ft3', 'Scrib_BF',
'Wood_density_lbs_ft3', 'Stem_biomass_UStons', 'Stem_biomass_kg', 'Bark_biomass_kg',
'Branch_biomass_kg', 'Aboveground_biomass_kg', 'Belowground_biomass_kg', 'LiveTree_biomass_kg',
'AbovegroundLive_tCO2e', 'BelowgroundLive_tCO2e', 'LiveTree_carbon_tCO2e',
'AbovegroundLive_tCO2e_ac', 'BelowgroundLive_tCO2e_ac', 'LiveTree_carbon_tCO2e_ac',
'LiveTree_carbon_tCO2e_total_AreaGIS', 'LiveTree_carbon_tCO2e_total_AreaRPT']
# write a separate CSV for each property in dataframe:
if not os.path.exists('FPS2ARB_Outputs'):
os.makedirs('FPS2ARB_Outputs')
num_files = 0
for prop in properties_to_run:
tree_list.loc[tree_list['Property'] == prop].to_csv(os.getcwd() + '/FPS2ARB_Outputs/' + 'FPS2ARB_' + prop + '_' + time.strftime('%Y-%m-%d') + '.csv', columns = cols, index = False)
num_files += 1
print 'FPS2ARB calculations completed. \n' + str(num_files) + ' CSV file(s) successfully written to ' + os.getcwd() + '\FPS2ARB_Outputs \n'
| 49.606695 | 185 | 0.70555 |
8bac06e0e9a31fbf3d73ca7b63c6a5687ad2c9a3 | 133 | py | Python | examples/multiple_dirs.py | pushkarkadam/autodirs | 0ca5d7fcac6390c19c122e347ffa235b5e9cdb78 | [
"MIT"
] | null | null | null | examples/multiple_dirs.py | pushkarkadam/autodirs | 0ca5d7fcac6390c19c122e347ffa235b5e9cdb78 | [
"MIT"
] | null | null | null | examples/multiple_dirs.py | pushkarkadam/autodirs | 0ca5d7fcac6390c19c122e347ffa235b5e9cdb78 | [
"MIT"
] | null | null | null | import sys
sys.path.append('../')
from autodirs import directories as di
di.group_by_text_files("text_files", path="sub/hogwarts")
| 19 | 57 | 0.759398 |
a44842a4f0d0203b5fdf19a898c5ebeb2d0b1d11 | 1,841 | py | Python | iper/urls.py | edumorris/iper | c3dbb4127529e0b4f82a16c845ba3bc9b6646a35 | [
"MIT"
] | null | null | null | iper/urls.py | edumorris/iper | c3dbb4127529e0b4f82a16c845ba3bc9b6646a35 | [
"MIT"
] | null | null | null | iper/urls.py | edumorris/iper | c3dbb4127529e0b4f82a16c845ba3bc9b6646a35 | [
"MIT"
] | null | null | null | """iper URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from django_registration.backends.one_step.urls import views as v
from django.contrib.auth import views, logout as auth_logout, login as auth_login
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView
from django.conf import settings
from django.views.static import serve
# RestAPI authentication
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('projectawards.urls')),
# Accounts
url(r'^accounts/', include('django_registration.backends.one_step.urls')),
# url(r'^accounts/login/$', v.login, {"next_page": '/'}),
url(r'^accounts/', include('django.contrib.auth.urls')),
# Favicon
path('favicon.ico', RedirectView.as_view(url=staticfiles_storage.url('img/favicon.ico'))),
# Static fixes
url(r'^media/(?P<path>.*)$', serve,{'document_root': settings.MEDIA_ROOT}),
url(r'^static/(?P<path>.*)$', serve,{'document_root': settings.STATIC_ROOT}),
# RestAPI Authentication
url(r'^api-token-auth/', obtain_auth_token)
]
| 42.813953 | 94 | 0.726779 |
03328a3ee79d9d1cb28a74be8146fb39da4b945d | 1,587 | py | Python | interview_challenges/landg/censor_serial.py | noelevans/playground | da529e967a15bcb217fff091ac0ec5c4dc1821ce | [
"MIT"
] | 1 | 2015-04-28T20:36:57.000Z | 2015-04-28T20:36:57.000Z | interview_challenges/landg/censor_serial.py | noelevans/playground | da529e967a15bcb217fff091ac0ec5c4dc1821ce | [
"MIT"
] | 2 | 2021-02-03T21:05:54.000Z | 2021-03-23T09:25:43.000Z | interview_challenges/landg/censor_serial.py | noelevans/playground | da529e967a15bcb217fff091ac0ec5c4dc1821ce | [
"MIT"
] | null | null | null | import argparse
import io
from typing import Iterator, TextIO
"""Censor a document of words passed in a separate text file.
Pass a file path for words to be removed and a second path which
is to be censored.
Usage:
python censor.py banned_words.txt prose.txt
"""
def censor_line(banned_words: {str}, line: str) -> Iterator[str]:
"""Removes censored words from a string.
Args:
banned_words: Words to be removed.
line: String of text to be censored.
"""
for n, word in enumerate(line.split()):
result = "*" * len(word) if word in banned_words else word
spacing = " " if n != 0 else ""
yield spacing + result
def run(censor_file: TextIO, prose_file: TextIO):
"""Replaces banned words with asteriks characters.
Args:
censor_file: File of words to be removed.
prose_file: File to be censored.
"""
censored_words = set(
element.replace("\n", "") for element in censor_file.readlines()
)
for n, line in enumerate(prose_file):
if n != 0:
print("")
for word in censor_line(censored_words, line):
print(word, end="")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Obscure censored words from a body of text"
)
parser.add_argument(
"censored_words", type=str, help="Text file containing censored words"
)
parser.add_argument("body_of_text", type=str, help="Text file to be censored")
args = parser.parse_args()
run(open(args.censored_words), open(args.body_of_text))
| 28.339286 | 82 | 0.650914 |
5df5e6d8838261350d301d370571c136bc697d38 | 2,061 | py | Python | tools/shaders-gen.py | andoma/rainbow | ed1e70033217450457e52b90276e41a746d5086a | [
"MIT"
] | null | null | null | tools/shaders-gen.py | andoma/rainbow | ed1e70033217450457e52b90276e41a746d5086a | [
"MIT"
] | null | null | null | tools/shaders-gen.py | andoma/rainbow | ed1e70033217450457e52b90276e41a746d5086a | [
"MIT"
] | 1 | 2018-09-20T19:34:36.000Z | 2018-09-20T19:34:36.000Z | #!/usr/bin/python
# Copyright (c) 2010-present Bifrost Entertainment AS and Tommy Nguyen
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT)
from datetime import date
import os
import re
import sys
shaders_dir = os.path.join(sys.path[0], "..", "src", "Graphics", "Shaders")
RE_COMMENT = re.compile(r'^\s*?//')
RE_INDENT = re.compile(r'^(\s+)')
RE_WHITESPACE = re.compile(r'^\s+$')
def main(argv):
shaders_h = []
for dirname, dirnames, filenames in os.walk(shaders_dir):
for filename in filenames:
if not filename.endswith("fsh") and not filename.endswith("vsh"):
continue
level = 0
shader = [ "const char k" + filename + "[] =\nR\"(" ]
shader[0] = re.sub(r'\.([fv])sh', r'\1', shader[0])
f = open(os.path.join(dirname, filename), 'r')
for line in f:
if RE_COMMENT.search(line):
continue
elif RE_WHITESPACE.match(line):
if len(shader) == 1:
continue
shader.append("")
else:
start_comment = line.find("//")
if start_comment >= 0:
line = line[:start_comment]
line = line.rstrip()
prefix = ""
m = RE_INDENT.search(line)
if m:
prefix += m.group(1)
shader.append(prefix + line.lstrip())
f.close()
shaders_h.append("\n".join(shader) + "\n)\";\n")
f = open(os.path.join(shaders_dir, "Shaders.h"), 'w')
f.write("// This file was auto-generated with '" + argv[0] + "'.\n")
f.write("// Copyright (c) " + str(date.today().year) + " Bifrost Entertainment AS and Tommy Nguyen.\n" +
"// Distributed under the MIT License.\n\n")
f.write("\n".join(shaders_h))
f.close()
return 0
if __name__ == "__main__":
main(sys.argv)
| 36.803571 | 108 | 0.518195 |
634eba1cbbec299e8ecb7785377c6bedf03bf674 | 27,054 | py | Python | magenta/models/score2perf/datagen_beam.py | Ali-Tahir/magenta | f2e6c55a841e7b60462606dced553f335771df1f | [
"Apache-2.0"
] | null | null | null | magenta/models/score2perf/datagen_beam.py | Ali-Tahir/magenta | f2e6c55a841e7b60462606dced553f335771df1f | [
"Apache-2.0"
] | null | null | null | magenta/models/score2perf/datagen_beam.py | Ali-Tahir/magenta | f2e6c55a841e7b60462606dced553f335771df1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam pipeline to generate examples for a Score2Perf dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import hashlib
import logging
import os
import random
import apache_beam as beam
from apache_beam import typehints
from apache_beam.metrics import Metrics
from magenta.music import chord_inference
from magenta.music import melody_inference
from magenta.music import sequences_lib
from magenta.protobuf import music_pb2
import numpy as np
from tensor2tensor.data_generators import generator_utils
import tensorflow as tf
import typing
# TODO(iansimon): this should probably be defined in the problem
SCORE_BPM = 120.0
# Shortcut to beat annotation.
BEAT = music_pb2.NoteSequence.TextAnnotation.BEAT
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string(
'pipeline_options', '',
'Command line flags to use in constructing the Beam pipeline options.')
# TODO(iansimon): Figure out how to avoid explicitly serializing and
# deserializing NoteSequence protos.
@typehints.with_output_types(typing.Tuple[str, str])
class ReadNoteSequencesFromTFRecord(beam.PTransform):
"""Beam PTransform that reads NoteSequence protos from TFRecord."""
def __init__(self, tfrecord_path):
super(ReadNoteSequencesFromTFRecord, self).__init__()
self._tfrecord_path = tfrecord_path
def expand(self, pcoll):
# Awkward to use ReadAllFromTFRecord instead of ReadFromTFRecord here,
# but for some reason ReadFromTFRecord doesn't work with gs:// URLs.
pcoll |= beam.Create([self._tfrecord_path])
pcoll |= beam.io.tfrecordio.ReadAllFromTFRecord()
pcoll |= beam.Map(
lambda ns_str: (music_pb2.NoteSequence.FromString(ns_str).id, ns_str))
return pcoll
def select_split(cumulative_splits, kv, unused_num_partitions):
"""Select split for an `(id, _)` tuple using a hash of `id`."""
key, _ = kv
m = hashlib.md5(key.encode('utf-8'))
r = int(m.hexdigest(), 16) / (2 ** (8 * m.digest_size))
for i, (name, p) in enumerate(cumulative_splits):
if r < p:
Metrics.counter('select_split', name).inc()
return i
assert False
def filter_invalid_notes(min_pitch, max_pitch, kv):
"""Filter notes with out-of-range pitch from NoteSequence protos."""
key, ns_str = kv
ns = music_pb2.NoteSequence.FromString(ns_str)
valid_notes = [note for note in ns.notes
if min_pitch <= note.pitch <= max_pitch]
if len(valid_notes) < len(ns.notes):
del ns.notes[:]
ns.notes.extend(valid_notes)
Metrics.counter('filter_invalid_notes', 'out_of_range_pitch').inc()
return key, ns.SerializeToString()
class DataAugmentationError(Exception):
"""Exception to be raised by augmentation functions on known failure."""
pass
class ExtractExamplesDoFn(beam.DoFn):
"""Extracts Score2Perf examples from NoteSequence protos."""
def __init__(self, min_hop_size_seconds, max_hop_size_seconds,
num_replications, encode_performance_fn, encode_score_fns,
augment_fns, absolute_timing, random_crop_length,
*unused_args, **unused_kwargs):
"""Initialize an ExtractExamplesDoFn.
If any of the `encode_score_fns` or `encode_performance_fn` returns an empty
encoding for a particular example, the example will be discarded.
Args:
min_hop_size_seconds: Minimum hop size in seconds at which input
NoteSequence protos can be split.
max_hop_size_seconds: Maximum hop size in seconds at which input
NoteSequence protos can be split. If zero or None, will not split at
all.
num_replications: Number of times input NoteSequence protos will be
replicated prior to splitting.
encode_performance_fn: Performance encoding function. Will be applied to
the performance NoteSequence and the resulting encoding will be stored
as 'targets' in each example.
encode_score_fns: Optional dictionary of named score encoding functions.
If provided, each function will be applied to the score NoteSequence
and the resulting encodings will be stored in each example.
augment_fns: Optional list of data augmentation functions. If provided,
each function will be applied to each performance NoteSequence (and
score, when using scores), creating a separate example per
augmentation function. Should not modify the NoteSequence.
absolute_timing: If True, each score will use absolute instead of tempo-
relative timing. Since chord inference depends on having beats, the
score will only contain melody.
random_crop_length: If specified, crop each encoded performance
('targets') to this length.
Raises:
ValueError: If the maximum hop size is less than twice the minimum hop
size, or if `encode_score_fns` and `random_crop_length` are both
specified.
"""
if (max_hop_size_seconds and
max_hop_size_seconds != min_hop_size_seconds and
max_hop_size_seconds < 2 * min_hop_size_seconds):
raise ValueError(
'Maximum hop size must be at least twice minimum hop size.')
if encode_score_fns and random_crop_length:
raise ValueError('Cannot perform random crop when scores are used.')
super(ExtractExamplesDoFn, self).__init__(*unused_args, **unused_kwargs)
self._min_hop_size_seconds = min_hop_size_seconds
self._max_hop_size_seconds = max_hop_size_seconds
self._num_replications = num_replications
self._encode_performance_fn = encode_performance_fn
self._encode_score_fns = encode_score_fns
self._augment_fns = augment_fns if augment_fns else [lambda ns: ns]
self._absolute_timing = absolute_timing
self._random_crop_length = random_crop_length
def process(self, kv):
# Seed random number generator based on key so that hop times are
# deterministic.
key, ns_str = kv
m = hashlib.md5(key.encode('utf-8'))
random.seed(int(m.hexdigest(), 16))
# Deserialize NoteSequence proto.
ns = music_pb2.NoteSequence.FromString(ns_str)
# Apply sustain pedal.
ns = sequences_lib.apply_sustain_control_changes(ns)
# Remove control changes as there are potentially a lot of them and they are
# no longer needed.
del ns.control_changes[:]
if (self._min_hop_size_seconds and
ns.total_time < self._min_hop_size_seconds):
Metrics.counter('extract_examples', 'sequence_too_short').inc()
return
sequences = []
for _ in range(self._num_replications):
if self._max_hop_size_seconds:
if self._max_hop_size_seconds == self._min_hop_size_seconds:
# Split using fixed hop size.
sequences += sequences_lib.split_note_sequence(
ns, self._max_hop_size_seconds)
else:
# Sample random hop positions such that each segment size is within
# the specified range.
hop_times = [0.0]
while hop_times[-1] <= ns.total_time - self._min_hop_size_seconds:
if hop_times[-1] + self._max_hop_size_seconds < ns.total_time:
# It's important that we get a valid hop size here, since the
# remainder of the sequence is too long.
max_offset = min(
self._max_hop_size_seconds,
ns.total_time - self._min_hop_size_seconds - hop_times[-1])
else:
# It's okay if the next hop time is invalid (in which case we'll
# just stop).
max_offset = self._max_hop_size_seconds
offset = random.uniform(self._min_hop_size_seconds, max_offset)
hop_times.append(hop_times[-1] + offset)
# Split at the chosen hop times (ignoring zero and the final invalid
# time).
sequences += sequences_lib.split_note_sequence(ns, hop_times[1:-1])
else:
sequences += [ns]
for performance_sequence in sequences:
if self._encode_score_fns:
# We need to extract a score.
if not self._absolute_timing:
# Beats are required to extract a score with metric timing.
beats = [
ta for ta in performance_sequence.text_annotations
if ta.annotation_type == BEAT
and ta.time <= performance_sequence.total_time
]
if len(beats) < 2:
Metrics.counter('extract_examples', 'not_enough_beats').inc()
continue
# Ensure the sequence starts and ends on a beat.
performance_sequence = sequences_lib.extract_subsequence(
performance_sequence,
start_time=min(beat.time for beat in beats),
end_time=max(beat.time for beat in beats)
)
# Infer beat-aligned chords (only for relative timing).
try:
chord_inference.infer_chords_for_sequence(
performance_sequence,
chord_change_prob=0.25,
chord_note_concentration=50.0,
add_key_signatures=True)
except chord_inference.ChordInferenceError:
Metrics.counter('extract_examples', 'chord_inference_failed').inc()
continue
# Infer melody regardless of relative/absolute timing.
try:
melody_instrument = melody_inference.infer_melody_for_sequence(
performance_sequence,
melody_interval_scale=2.0,
rest_prob=0.1,
instantaneous_non_max_pitch_prob=1e-15,
instantaneous_non_empty_rest_prob=0.0,
instantaneous_missing_pitch_prob=1e-15)
except melody_inference.MelodyInferenceError:
Metrics.counter('extract_examples', 'melody_inference_failed').inc()
continue
if not self._absolute_timing:
# Now rectify detected beats to occur at fixed tempo.
# TODO(iansimon): also include the alignment
score_sequence, unused_alignment = sequences_lib.rectify_beats(
performance_sequence, beats_per_minute=SCORE_BPM)
else:
# Score uses same timing as performance.
score_sequence = copy.deepcopy(performance_sequence)
# Remove melody notes from performance.
performance_notes = []
for note in performance_sequence.notes:
if note.instrument != melody_instrument:
performance_notes.append(note)
del performance_sequence.notes[:]
performance_sequence.notes.extend(performance_notes)
# Remove non-melody notes from score.
score_notes = []
for note in score_sequence.notes:
if note.instrument == melody_instrument:
score_notes.append(note)
del score_sequence.notes[:]
score_sequence.notes.extend(score_notes)
# Remove key signatures and beat/chord annotations from performance.
del performance_sequence.key_signatures[:]
del performance_sequence.text_annotations[:]
Metrics.counter('extract_examples', 'extracted_score').inc()
for augment_fn in self._augment_fns:
# Augment and encode the performance.
try:
augmented_performance_sequence = augment_fn(performance_sequence)
except DataAugmentationError:
Metrics.counter(
'extract_examples', 'augment_performance_failed').inc()
continue
example_dict = {
'targets': self._encode_performance_fn(
augmented_performance_sequence)
}
if not example_dict['targets']:
Metrics.counter('extract_examples', 'skipped_empty_targets').inc()
continue
if (self._random_crop_length and
len(example_dict['targets']) > self._random_crop_length):
# Take a random crop of the encoded performance.
max_offset = len(example_dict['targets']) - self._random_crop_length
offset = random.randrange(max_offset + 1)
example_dict['targets'] = example_dict['targets'][
offset:offset + self._random_crop_length]
if self._encode_score_fns:
# Augment the extracted score.
try:
augmented_score_sequence = augment_fn(score_sequence)
except DataAugmentationError:
Metrics.counter('extract_examples', 'augment_score_failed').inc()
continue
# Apply all score encoding functions.
skip = False
for name, encode_score_fn in self._encode_score_fns.items():
example_dict[name] = encode_score_fn(augmented_score_sequence)
if not example_dict[name]:
Metrics.counter('extract_examples',
'skipped_empty_%s' % name).inc()
skip = True
break
if skip:
continue
Metrics.counter('extract_examples', 'encoded_example').inc()
Metrics.distribution(
'extract_examples', 'performance_length_in_seconds').update(
int(augmented_performance_sequence.total_time))
yield generator_utils.to_example(example_dict)
def generate_examples(input_transform, output_dir, problem_name, splits,
min_hop_size_seconds, max_hop_size_seconds,
num_replications, min_pitch, max_pitch,
encode_performance_fn, encode_score_fns=None,
augment_fns=None, absolute_timing=False,
random_crop_length=None):
"""Generate data for a Score2Perf problem.
Args:
input_transform: The input PTransform object that reads input NoteSequence
protos, or dictionary mapping split names to such PTransform objects.
Should produce `(id, NoteSequence)` tuples.
output_dir: The directory to write the resulting TFRecord file containing
examples.
problem_name: Name of the Tensor2Tensor problem, used as a base filename
for generated data.
splits: A dictionary of split names and their probabilities. Probabilites
should add up to 1. If `input_filename` is a dictionary, this argument
will be ignored.
min_hop_size_seconds: Minimum hop size in seconds at which input
NoteSequence protos can be split. Can also be a dictionary mapping split
name to minimum hop size.
max_hop_size_seconds: Maximum hop size in seconds at which input
NoteSequence protos can be split. If zero or None, will not split at
all. Can also be a dictionary mapping split name to maximum hop size.
num_replications: Number of times input NoteSequence protos will be
replicated prior to splitting.
min_pitch: Minimum MIDI pitch value; notes with lower pitch will be dropped.
max_pitch: Maximum MIDI pitch value; notes with greater pitch will be
dropped.
encode_performance_fn: Required performance encoding function.
encode_score_fns: Optional dictionary of named score encoding functions.
augment_fns: Optional list of data augmentation functions. Only applied in
the 'train' split.
absolute_timing: If True, each score will use absolute instead of tempo-
relative timing. Since chord inference depends on having beats, the
score will only contain melody.
random_crop_length: If specified, crop each encoded performance to this
length. Cannot be specified if using scores.
Raises:
ValueError: If split probabilities do not add up to 1, or if splits are not
provided but `input_filename` is not a dictionary.
"""
# Make sure Beam's log messages are not filtered.
logging.getLogger().setLevel(logging.INFO)
if isinstance(input_transform, dict):
split_names = input_transform.keys()
else:
if not splits:
raise ValueError(
'Split probabilities must be provided if input is not presplit.')
split_names, split_probabilities = zip(*splits.items())
cumulative_splits = list(zip(split_names, np.cumsum(split_probabilities)))
if cumulative_splits[-1][1] != 1.0:
raise ValueError('Split probabilities must sum to 1; got %f' %
cumulative_splits[-1][1])
# Check for existence of prior outputs. Since the number of shards may be
# different, the prior outputs will not necessarily be overwritten and must
# be deleted explicitly.
output_filenames = [
os.path.join(output_dir, '%s-%s.tfrecord' % (problem_name, split_name))
for split_name in split_names
]
for split_name, output_filename in zip(split_names, output_filenames):
existing_output_filenames = tf.gfile.Glob(output_filename + '*')
if existing_output_filenames:
tf.logging.info(
'Data files already exist for split %s in problem %s, deleting.',
split_name, problem_name)
for filename in existing_output_filenames:
tf.gfile.Remove(filename)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
FLAGS.pipeline_options.split(','))
with beam.Pipeline(options=pipeline_options) as p:
if isinstance(input_transform, dict):
# Input data is already partitioned into splits.
split_partitions = [
p | 'input_transform_%s' % split_name >> input_transform[split_name]
for split_name in split_names
]
else:
# Read using a single PTransform.
p |= 'input_transform' >> input_transform
split_partitions = p | 'partition' >> beam.Partition(
functools.partial(select_split, cumulative_splits),
len(cumulative_splits))
for split_name, output_filename, s in zip(
split_names, output_filenames, split_partitions):
if isinstance(min_hop_size_seconds, dict):
min_hop = min_hop_size_seconds[split_name]
else:
min_hop = min_hop_size_seconds
if isinstance(max_hop_size_seconds, dict):
max_hop = max_hop_size_seconds[split_name]
else:
max_hop = max_hop_size_seconds
s |= 'preshuffle_%s' % split_name >> beam.Reshuffle()
s |= 'filter_invalid_notes_%s' % split_name >> beam.Map(
functools.partial(filter_invalid_notes, min_pitch, max_pitch))
s |= 'extract_examples_%s' % split_name >> beam.ParDo(
ExtractExamplesDoFn(
min_hop, max_hop,
num_replications if split_name == 'train' else 1,
encode_performance_fn, encode_score_fns,
augment_fns if split_name == 'train' else None,
absolute_timing,
random_crop_length))
s |= 'shuffle_%s' % split_name >> beam.Reshuffle()
s |= 'write_%s' % split_name >> beam.io.WriteToTFRecord(
output_filename, coder=beam.coders.ProtoCoder(tf.train.Example))
class ConditionalExtractExamplesDoFn(beam.DoFn):
"""Extracts Score2Perf examples from NoteSequence protos for conditioning."""
def __init__(self, encode_performance_fn, encode_score_fns,
augment_fns, num_replications, *unused_args, **unused_kwargs):
"""Initialize a ConditionalExtractExamplesDoFn.
If any of the `encode_score_fns` or `encode_performance_fn` returns an empty
encoding for a particular example, the example will be discarded.
Args:
encode_performance_fn: Performance encoding function. Will be applied to
the performance NoteSequence and the resulting encoding will be stored
as 'targets' in each example.
encode_score_fns: Optional dictionary of named score encoding functions.
If provided, each function will be applied to the score NoteSequence
and the resulting encodings will be stored in each example.
augment_fns: Optional list of data augmentation functions. If provided,
each function will be applied to each performance NoteSequence (and
score, when using scores), creating a separate example per
augmentation function. Should not modify the NoteSequence.
num_replications: Number of times input NoteSequence protos will be
replicated prior to splitting.
Raises:
ValueError: If the maximum hop size is less than twice the minimum hop
size, or if `encode_score_fns` and `random_crop_length` are both
specified.
"""
super(ConditionalExtractExamplesDoFn, self).__init__(
*unused_args, **unused_kwargs)
self._encode_performance_fn = encode_performance_fn
self._encode_score_fns = encode_score_fns
self._num_replications = num_replications
self._augment_fns = augment_fns if augment_fns else [lambda ns: ns]
def process(self, kv):
# Seed random number generator based on key so that hop times are
# deterministic.
key, ns_str = kv
m = hashlib.md5(key)
random.seed(int(m.hexdigest(), 16))
# Deserialize NoteSequence proto.
ns = music_pb2.NoteSequence.FromString(ns_str)
# Apply sustain pedal.
ns = sequences_lib.apply_sustain_control_changes(ns)
# Remove control changes as there are potentially a lot of them and they are
# no longer needed.
del ns.control_changes[:]
for _ in range(self._num_replications):
for augment_fn in self._augment_fns:
# Augment and encode the performance.
try:
augmented_performance_sequence = augment_fn(ns)
except DataAugmentationError:
Metrics.counter(
'extract_examples', 'augment_performance_failed').inc()
continue
seq = self._encode_performance_fn(augmented_performance_sequence)
# feed in performance as both input/output to music transformer
# chopping sequence into length 2048 (throw out shorter sequences)
if len(seq) >= 2048:
max_offset = len(seq) - 2048
offset = random.randrange(max_offset + 1)
cropped_seq = seq[offset:offset + 2048]
example_dict = {
'inputs': cropped_seq,
'targets': cropped_seq
}
yield generator_utils.to_example(example_dict)
def generate_perf_examples(input_transform, output_dir, problem_name, splits,
min_pitch, max_pitch,
encode_performance_fn, encode_score_fns=None,
augment_fns=None, num_replications=None):
"""Generate data for a ConditionalScore2Perf problem.
Args:
input_transform: The input PTransform object that reads input NoteSequence
protos, or dictionary mapping split names to such PTransform objects.
Should produce `(id, NoteSequence)` tuples.
output_dir: The directory to write the resulting TFRecord file containing
examples.
problem_name: Name of the Tensor2Tensor problem, used as a base filename
for generated data.
splits: A dictionary of split names and their probabilities. Probabilites
should add up to 1. If `input_filename` is a dictionary, this argument
will be ignored.
min_pitch: Minimum MIDI pitch value; notes with lower pitch will be dropped.
max_pitch: Maximum MIDI pitch value; notes with greater pitch will be
dropped.
encode_performance_fn: Required performance encoding function.
encode_score_fns: Optional dictionary of named score encoding functions.
augment_fns: Optional list of data augmentation functions. Only applied in
the 'train' split.
num_replications: Number of times input NoteSequence protos will be
replicated prior to splitting.
Raises:
ValueError: If split probabilities do not add up to 1, or if splits are not
provided but `input_filename` is not a dictionary.
"""
# Make sure Beam's log messages are not filtered.
logging.getLogger().setLevel(logging.INFO)
if isinstance(input_transform, dict):
split_names = input_transform.keys()
else:
if not splits:
raise ValueError(
'Split probabilities must be provided if input is not presplit.')
split_names, split_probabilities = zip(*splits.items())
cumulative_splits = zip(split_names, np.cumsum(split_probabilities))
if cumulative_splits[-1][1] != 1.0:
raise ValueError('Split probabilities must sum to 1; got %f' %
cumulative_splits[-1][1])
# Check for existence of prior outputs. Since the number of shards may be
# different, the prior outputs will not necessarily be overwritten and must
# be deleted explicitly.
output_filenames = [
os.path.join(output_dir, '%s-%s.tfrecord' % (problem_name, split_name))
for split_name in split_names
]
for split_name, output_filename in zip(split_names, output_filenames):
existing_output_filenames = tf.gfile.Glob(output_filename + '*')
if existing_output_filenames:
tf.logging.info(
'Data files already exist for split %s in problem %s, deleting.',
split_name, problem_name)
for filename in existing_output_filenames:
tf.gfile.Remove(filename)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
FLAGS.pipeline_options.split(','))
with beam.Pipeline(options=pipeline_options) as p:
if isinstance(input_transform, dict):
# Input data is already partitioned into splits.
split_partitions = [
p | 'input_transform_%s' % split_name >> input_transform[split_name]
for split_name in split_names
]
else:
# Read using a single PTransform.
p |= 'input_transform' >> input_transform
split_partitions = p | 'partition' >> beam.Partition(
functools.partial(select_split, cumulative_splits),
len(cumulative_splits))
for split_name, output_filename, s in zip(
split_names, output_filenames, split_partitions):
s |= 'preshuffle_%s' % split_name >> beam.Reshuffle()
s |= 'filter_invalid_notes_%s' % split_name >> beam.Map(
functools.partial(filter_invalid_notes, min_pitch, max_pitch))
s |= 'extract_examples_%s' % split_name >> beam.ParDo(
ConditionalExtractExamplesDoFn(
encode_performance_fn, encode_score_fns,
augment_fns if split_name == 'train' else None,
num_replications if split_name == 'train' else 1))
s |= 'shuffle_%s' % split_name >> beam.Reshuffle()
s |= 'write_%s' % split_name >> beam.io.WriteToTFRecord(
output_filename, coder=beam.coders.ProtoCoder(tf.train.Example))
| 43.011129 | 80 | 0.689843 |
b72abe75e3722a3483ad2f4e12d86cf8fd2a32d6 | 4,090 | py | Python | main.py | TaigaOsguthorpe/PySide2-Multiple-Windows | 2d2574288f290f50fcf7f908103ebe1d525395c9 | [
"CC0-1.0"
] | null | null | null | main.py | TaigaOsguthorpe/PySide2-Multiple-Windows | 2d2574288f290f50fcf7f908103ebe1d525395c9 | [
"CC0-1.0"
] | null | null | null | main.py | TaigaOsguthorpe/PySide2-Multiple-Windows | 2d2574288f290f50fcf7f908103ebe1d525395c9 | [
"CC0-1.0"
] | null | null | null | import sys
from PySide2.QtWidgets import QApplication, QMainWindow
from PySide2.QtCore import QFile
from PySide2 import QtCore
from PySide2.QtGui import QPixmap
from ui_files.main_gui import Ui_MainWindow
from ui_files.image_window_gui import Ui_MainWindow as Ui_ImageWindow
from pathlib import Path
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# When the "new_window_button" is clicked fire off the function "new_image_window"
self.ui.new_window_button.clicked.connect(self.new_image_window)
self.window_list = []
def clean(self):
print("clean: START")
cleaned = 0
for item in self.window_list:
if item["del"] == True:
print("FOUND TRUE!")
#print(type(item["mem_adress"]))
#print(item["mem_adress"])
#del item["mem_adress"]
self.window_list.remove(item)
cleaned = cleaned + 1
else:
pass
print("clean: cleaned {0} windows".format(cleaned))
print("clean: END")
def new_image_window(self):
print("new_image_window: START")
# Clean the window_list to free up a tiny bit of memory that does not need to be used.
self.clean()
# Create the new window itself via the ImageWindow class
w = ImageWindow(self)
# Keep the window in reference so that Garbadge Collection does not snap it up and remove it.
self.window_list.append({"mem_adress": w, "del": False})
# Render the new window on screen
w.show()
print("new_image_window: END")
def closeEvent(self, event):
# Capture the close event of the main window and quit the aplication, forcing all other windows to close with it and free up their used memory.
# This event capture ("closeEvent") can be removed and allow each new window to act semi independantly but this could cause unknown errors to ocour.
app.quit()
class ImageWindow(QMainWindow):
#class ImageWindow(MainWindow):
def __init__(self, parent):
# inherit all the things a QMainWindow can do.
# This in its current form also has the side effect of the main window not truly being a main window meaning that all windows created (this one)
# from it act as independant windows, this has its ups and downs but for now we are going to ignore it as you will see no differnece right now,
# but just keep this in mind.
super(ImageWindow, self).__init__()
# WA_DeleteOnClose means that this window's memory shall be removed when it is exited even if the main window of our gui is not.
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.ui = Ui_ImageWindow()
self.ui.setupUi(self)
# (currently a placeholder of doing it in a differnet way)
# reference our parent window to be able to acess the list to remove the one reference keeping this window alive and safe from Garbadge Collection
self.parent = parent
# Image setup
file = "{0}/a.tif".format(Path(__file__).parent.absolute())
self.ui.image_label.setPixmap(QPixmap(file).scaled(1024, 1024, QtCore.Qt.KeepAspectRatio))
def closeEvent(self, event):
# Capture the close event and remove ourself from the parent's window_list
print("closEvent detected")
print(self.parent.window_list)
for item in self.parent.window_list:
print(item)
print(item["mem_adress"])
print(self)
if item["mem_adress"] == self:
print("FOUND SELF!")
item.update({"del": True})
else:
pass
event.accept()
print("closEvent accepted")
if __name__ == "__main__":
# Run the aplication
app = QApplication(sys.argv)
window = MainWindow()
window.show()
# Start the main loop of the program
sys.exit(app.exec_())
| 36.19469 | 156 | 0.647433 |
09ef6f7a073082dc8065ba5663bc548a90966222 | 3,659 | py | Python | modules/rnn.py | JZX555/FineGrainedDomainAdaption | 3873a7af9475d4fd3c1cc3c676ad3153fc3ce698 | [
"Apache-2.0"
] | null | null | null | modules/rnn.py | JZX555/FineGrainedDomainAdaption | 3873a7af9475d4fd3c1cc3c676ad3153fc3ce698 | [
"Apache-2.0"
] | 2 | 2021-11-12T08:01:06.000Z | 2021-11-13T08:18:18.000Z | modules/rnn.py | JZX555/FineGrainedDomainAdaption | 3873a7af9475d4fd3c1cc3c676ad3153fc3ce698 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from utils import init, nest
def sort_batch(seq_len):
"""Sorts torch tensor of integer indices by decreasing order."""
with torch.no_grad():
slens, sidxs = torch.sort(seq_len, descending=True)
oidxs = torch.sort(sidxs)[1]
return oidxs, sidxs, slens.tolist()
class RNN(nn.Module):
"""
The RNN unit which is used to build encoder and decoder.
"""
def __init__(self, type='LSTM', batch_first=True, **kwargs):
super(RNN, self).__init__()
self.type = type
self.batch_first = batch_first
if self.type == 'LSTM':
self.rnn = nn.LSTM(batch_first=self.batch_first, **kwargs)
elif self.type == 'GRU':
self.rnn = nn.GRU(batch_first=self.batch_first, **kwargs)
else:
raise NameError(
'Type {} is not define, please use LSTM or GRU.'.format(
self.type))
self._reset_parameters()
def _reset_parameters(self):
for weight in self.rnn.parameters():
init.rnn_init(weight)
def forward(self, input, input_mask, h_0=None, bert=False):
"""
:param input: Input sequence.
With shape [batch_size, input_len, dim] if batch_first is True.
:param input_mask: Mask of sequence.
"""
self.rnn.flatten_parameters(
) # This is necessary if want to use DataParallel
# Convert into batch first
if self.batch_first is False:
input = input.transpose(0, 1).contiguous()
input_mask = input_mask.transpose(0, 1).contiguous()
##########################
# Pad zero length with 1 #
##########################
with torch.no_grad():
add = 0
if bert is True:
add = 1
seq_len = (1 - input_mask.long()).sum(1) + add # [batch_size, ]
seq_len[seq_len.eq(0)] = 1
out, h_n = self._forward_rnn(input, seq_len, h_0=h_0)
if self.batch_first is False:
out = out.transpose(0, 1).contiguous() # Convert to batch_second
return out, h_n
def _forward_rnn(self, input, input_length, h_0=None):
"""
:param input: Input sequence.
FloatTensor with shape [batch_size, input_len, dim]
:param input_length: Mask of sequence.
LongTensor with shape [batch_size, ]
"""
total_length = input.size(1)
# 1. Packed with pad
oidx, sidx, slens = sort_batch(input_length)
input_sorted = torch.index_select(input, index=sidx, dim=0)
if h_0 is not None:
h_0_sorted = nest.map_structure(
lambda t: torch.index_select(t, 1, sidx), h_0)
else:
h_0_sorted = None
# 2. RNN compute
input_packed = pack_padded_sequence(input_sorted,
slens,
batch_first=True)
out_packed, h_n_sorted = self.rnn(input_packed, h_0_sorted)
# 3. Restore
out_sorted = pad_packed_sequence(out_packed,
batch_first=True,
total_length=total_length)[0]
out = torch.index_select(out_sorted, dim=0, index=oidx)
h_n_sorted = nest.map_structure(
lambda t: torch.index_select(t, 1, oidx), h_n_sorted)
return out.contiguous(), h_n_sorted
if __name__ == '__main__':
RNN(type='LSTM', input_size=50, hidden_size=100)
| 31.543103 | 77 | 0.565455 |
99a2df053f6f4b192d7d85ef7b8b59046e3c19ea | 1,023 | py | Python | tests/kubernetes/checks/test_ApiServerAuditLog.py | kylelaker/checkov | 6eada26030a87f397a6bf1831827b3dc6c5dad2d | [
"Apache-2.0"
] | 5 | 2021-07-29T18:08:40.000Z | 2022-03-21T04:39:32.000Z | tests/kubernetes/checks/test_ApiServerAuditLog.py | kylelaker/checkov | 6eada26030a87f397a6bf1831827b3dc6c5dad2d | [
"Apache-2.0"
] | 16 | 2021-03-09T07:38:38.000Z | 2021-06-09T03:53:55.000Z | tests/kubernetes/checks/test_ApiServerAuditLog.py | kylelaker/checkov | 6eada26030a87f397a6bf1831827b3dc6c5dad2d | [
"Apache-2.0"
] | 2 | 2021-08-23T13:25:36.000Z | 2021-11-05T21:44:52.000Z | import os
import unittest
from checkov.kubernetes.checks.ApiServerAuditLog import check
from checkov.kubernetes.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestApiServerProfiling(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_ApiServerAuditLog"
report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
self.assertEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 1)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
for failed in report.failed_checks:
self.assertTrue("should-fail" in failed.resource)
for passed in report.passed_checks:
self.assertTrue("should-pass" in passed.resource)
if __name__ == '__main__':
unittest.main()
| 31.96875 | 101 | 0.7087 |
9014e9069133afbc4d79b03f3ee353b50458535e | 7,599 | py | Python | bokeh/charts/utils.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 1 | 2021-11-07T18:55:59.000Z | 2021-11-07T18:55:59.000Z | bokeh/charts/utils.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | null | null | null | bokeh/charts/utils.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 1 | 2021-08-01T08:38:53.000Z | 2021-08-01T08:38:53.000Z | """ This is the utils module that collects convenience functions and code that are
useful for charts ecosystem.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import itertools
from math import cos, sin
from ..browserlib import view
from ..document import Document
from ..embed import file_html
from ..models import GlyphRenderer
from ..models.glyphs import (
Asterisk, Circle, CircleCross, CircleX, Cross, Diamond, DiamondCross,
InvertedTriangle, Square, SquareCross, SquareX, Triangle, X)
from ..resources import INLINE
from ..session import Session
from ..utils import publish_display_data
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# TODO: (bev) this should go in a plotting utils one level up
_default_cycle_palette = [
"#f22c40", "#5ab738", "#407ee7", "#df5320", "#00ad9c", "#c33ff3"
]
def cycle_colors(chunk, palette=_default_cycle_palette):
""" Build a color list just cycling through a given palette.
Args:
chuck (seq): the chunk of elements to generate the color list
palette (seq[color]) : a palette of colors to cycle through
Returns:
colors
"""
colors = []
g = itertools.cycle(palette)
for i in range(len(chunk)):
colors.append(next(g))
return colors
# TODO: (bev) this should go in a plotting utils one level up
def make_scatter(source, x, y, markertype, color, line_color=None,
size=10, fill_alpha=0.2, line_alpha=1.0):
"""Create a marker glyph and appends it to the renderers list.
Args:
source (obj): datasource object containing markers references.
x (str or list[float]) : values or field names of line ``x`` coordinates
y (str or list[float]) : values or field names of line ``y`` coordinates
markertype (int or str): Marker type to use (e.g., 2, 'circle', etc.)
color (str): color of the points
size (int) : size of the scatter marker
fill_alpha(float) : alpha value of the fill color
line_alpha(float) : alpha value of the line color
Return:
scatter: Marker Glyph instance
"""
if line_color is None:
line_color = color
_marker_types = OrderedDict(
[
("circle", Circle),
("square", Square),
("triangle", Triangle),
("diamond", Diamond),
("inverted_triangle", InvertedTriangle),
("asterisk", Asterisk),
("cross", Cross),
("x", X),
("circle_cross", CircleCross),
("circle_x", CircleX),
("square_x", SquareX),
("square_cross", SquareCross),
("diamond_cross", DiamondCross),
]
)
g = itertools.cycle(_marker_types.keys())
if isinstance(markertype, int):
for i in range(markertype):
shape = next(g)
else:
shape = markertype
glyph = _marker_types[shape](
x=x, y=y, size=size, fill_color=color, fill_alpha=fill_alpha,
line_color=line_color, line_alpha=line_alpha
)
return GlyphRenderer(data_source=source, glyph=glyph)
def chunk(l, n):
"""Yield successive n-sized chunks from l.
Args:
l (list: the incomming list to be chunked
n (int): lenght of you chucks
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def polar_to_cartesian(r, start_angles, end_angles):
"""Translate polar coordinates to cartesian.
Args:
r (float): radial coordinate
start_angles (list(float)): list of start angles
end_angles (list(float)): list of end_angles angles
Returns:
x, y points
"""
cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))
points = []
for start, end in zip(start_angles, end_angles):
points.append(cartesian(r, (end + start)/2))
return zip(*points)
# TODO: Experimental implementation. This should really be a shared
# pattern between plotting/charts and other bokeh interfaces.
# This will probably be part of the future charts re-design
# to make them inherit from plot (or at least be closer to).
# In this was both charts and plotting could share figure,
# show, save, push methods as well as VBox, etc...
class Figure(object):
def __init__(self, *charts, **kwargs):
self.filename = kwargs.pop('filename', None)
self.server = kwargs.pop('server', None)
self.notebook = kwargs.pop('notebook', None)
self.title = kwargs.pop('title', '')
self.children = kwargs.pop('children', None)
self.charts = charts
self.doc = Document()
self.doc.hold(True)
self._plots = []
if self.server:
self.session = Session()
self.session.use_doc(self.server)
self.session.load_document(self.doc)
if self.children:
from bokeh.models import VBox
self.doc.add(VBox(children=self.children))
self.plot = None
for i, chart in enumerate(self.charts):
chart.doc = self.doc
if self.server:
chart.session = self.session
# Force the chart to create the underlying plot
chart._setup_show()
chart._prepare_show()
chart._show_teardown()
if not self.title:
self.title = chart.chart.title
self._plots += chart.chart._plots
# reset the pot title with the one set for the Figure
self.doc._current_plot.title = self.title
def show(self):
"""Main show function.
It shows the Figure in file, server and notebook outputs.
"""
show(self, self.title, self.filename, self.server, self.notebook)
def show(obj, title='test', filename=False, server=False, notebook=False, **kws):
""" 'shows' a plot object, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
obj (Widget/Plot object, optional): it accepts a plot object and just shows it.
"""
if filename:
if filename is True:
filename = "untitled"
else:
filename = filename
with open(filename, "w") as f:
f.write(file_html(obj.doc, INLINE, title))
print("Wrote %s" % filename)
view(filename)
elif filename is False and server is False and notebook is False:
print("You have to provide a filename (filename='foo.html' or"
" .filename('foo.html')) to save your plot.")
if server:
obj.session.store_document(obj.doc)
link = obj.session.object_link(obj.doc.context)
view(link)
if notebook:
from bokeh.embed import notebook_div
for plot in obj._plots:
publish_display_data({'text/html': notebook_div(plot)})
| 33.475771 | 87 | 0.588893 |
4efe4cfd75bb872c29f4bdaf5748efa9414f11a9 | 1,996 | py | Python | storage/models.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | storage/models.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | storage/models.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
models.py
Created by Darcy Liu on 2012-04-02.
Copyright (c) 2012 Close To U. All rights reserved.
"""
from django.db import models
from django.contrib.auth.models import User
class Storage(models.Model):
key = models.AutoField(primary_key=True)
storage = models.CharField(max_length=64,verbose_name='name')
bucket = models.CharField(max_length=64,verbose_name='bucket')
path = models.CharField(max_length=256,verbose_name='path')
name = models.CharField(max_length=256,verbose_name='name')
mime = models.CharField(max_length=64,verbose_name='mime')
size = models.IntegerField(default=0,verbose_name='size')
md5 = models.CharField(max_length=32,verbose_name='md5')
MEDIA_CHOICES = (
('image', 'Image'),
('video', 'Video'),
('audio', 'Audio'),
('unknown', 'Unknown'),
)
kind = models.CharField(max_length=16,choices=MEDIA_CHOICES,default='unknown')
public = models.BooleanField(default=False,verbose_name='public')
author = models.ForeignKey(User,verbose_name='author')
created = models.DateTimeField(auto_now_add=True,verbose_name='created')
updated = models.DateTimeField(auto_now=True,verbose_name='updated')
def __unicode__(self):
result = self.name + self.path
return unicode(result)
# class Media(models.Model):
# key = models.AutoField(primary_key=True)
# kind = models.CharField(max_length=64,verbose_name='kind')
# pv = models.IntegerField(default=0,verbose_name='pv')
# comments = models.IntegerField(default=0,verbose_name='comments')
# entity = models.ForeignKey(Storage,verbose_name='storage')
# author = models.ForeignKey(User,verbose_name='author')
# created = models.DateTimeField(auto_now_add=True,verbose_name='created')
# updated = models.DateTimeField(auto_now=True,verbose_name='updated')
# def __unicode__(self):
# result = self.kind + '/'
# return unicode(result) | 41.583333 | 82 | 0.705411 |
58f732764c1a1fbb05be04f75f9ea606993a7cdb | 8,574 | py | Python | tools/clang/scripts/test_tool.py | mtk-watch/android_external_v8 | 29eb30806a59123b1f9faf9083a12d26fa418fad | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | tools/clang/scripts/test_tool.py | mtk-watch/android_external_v8 | 29eb30806a59123b1f9faf9083a12d26fa418fad | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | tools/clang/scripts/test_tool.py | mtk-watch/android_external_v8 | 29eb30806a59123b1f9faf9083a12d26fa418fad | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test harness for chromium clang tools."""
import argparse
import difflib
import glob
import json
import os
import os.path
import shutil
import subprocess
import sys
def _RunGit(args):
if sys.platform == 'win32':
args = ['git.bat'] + args
else:
args = ['git'] + args
subprocess.check_call(args)
def _GenerateCompileCommands(files, include_paths):
"""Returns a JSON string containing a compilation database for the input."""
# Note: in theory, backslashes in the compile DB should work but the tools
# that write compile DBs and the tools that read them don't agree on the
# escaping convention: https://llvm.org/bugs/show_bug.cgi?id=19687
files = [f.replace('\\', '/') for f in files]
include_path_flags = ' '.join('-I %s' % include_path.replace('\\', '/')
for include_path in include_paths)
return json.dumps([{'directory': os.path.dirname(f),
'command': 'clang++ -std=c++14 -fsyntax-only %s -c %s' % (
include_path_flags, os.path.basename(f)),
'file': os.path.basename(f)} for f in files], indent=2)
def _NumberOfTestsToString(tests):
"""Returns an English describing the number of tests."""
return '%d test%s' % (tests, 's' if tests != 1 else '')
def _ApplyTool(tools_clang_scripts_directory,
tool_to_test,
tool_path,
tool_args,
test_directory_for_tool,
actual_files,
apply_edits):
try:
# Stage the test files in the git index. If they aren't staged, then
# run_tool.py will skip them when applying replacements.
args = ['add']
args.extend(actual_files)
_RunGit(args)
# Launch the following pipeline if |apply_edits| is True:
# run_tool.py ... | extract_edits.py | apply_edits.py ...
# Otherwise just the first step is done and the result is written to
# actual_files[0].
processes = []
args = ['python',
os.path.join(tools_clang_scripts_directory, 'run_tool.py')]
extra_run_tool_args_path = os.path.join(test_directory_for_tool,
'run_tool.args')
if os.path.exists(extra_run_tool_args_path):
with open(extra_run_tool_args_path, 'r') as extra_run_tool_args_file:
extra_run_tool_args = extra_run_tool_args_file.readlines()
args.extend([arg.strip() for arg in extra_run_tool_args])
args.extend(['--tool', tool_to_test, '-p', test_directory_for_tool])
if tool_path:
args.extend(['--tool-path', tool_path])
if tool_args:
for arg in tool_args:
args.append('--tool-arg=%s' % arg)
args.extend(actual_files)
processes.append(subprocess.Popen(args, stdout=subprocess.PIPE))
if apply_edits:
args = [
'python',
os.path.join(tools_clang_scripts_directory, 'extract_edits.py')
]
processes.append(subprocess.Popen(
args, stdin=processes[-1].stdout, stdout=subprocess.PIPE))
args = [
'python',
os.path.join(tools_clang_scripts_directory, 'apply_edits.py'), '-p',
test_directory_for_tool
]
processes.append(subprocess.Popen(
args, stdin=processes[-1].stdout, stdout=subprocess.PIPE))
# Wait for the pipeline to finish running + check exit codes.
stdout, _ = processes[-1].communicate()
for process in processes:
process.wait()
if process.returncode != 0:
print 'Failure while running the tool.'
return process.returncode
if apply_edits:
# Reformat the resulting edits via: git cl format.
args = ['cl', 'format']
args.extend(actual_files)
_RunGit(args)
else:
with open(actual_files[0], 'w') as output_file:
output_file.write(stdout)
return 0
finally:
# No matter what, unstage the git changes we made earlier to avoid polluting
# the index.
args = ['reset', '--quiet', 'HEAD']
args.extend(actual_files)
_RunGit(args)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--apply-edits',
action='store_true',
help='Applies the edits to the original test files and compares the '
'reformatted new files with the expected files.')
parser.add_argument(
'--tool-arg', nargs='?', action='append',
help='optional arguments passed to the tool')
parser.add_argument(
'--tool-path', nargs='?',
help='optional path to the tool directory')
parser.add_argument('tool_name',
nargs=1,
help='Clang tool to be tested.')
args = parser.parse_args(argv)
tool_to_test = args.tool_name[0]
print '\nTesting %s\n' % tool_to_test
tools_clang_scripts_directory = os.path.dirname(os.path.realpath(__file__))
tools_clang_directory = os.path.dirname(tools_clang_scripts_directory)
test_directory_for_tool = os.path.join(
tools_clang_directory, tool_to_test, 'tests')
compile_database = os.path.join(test_directory_for_tool,
'compile_commands.json')
source_files = glob.glob(os.path.join(test_directory_for_tool,
'*-original.cc'))
ext = 'cc' if args.apply_edits else 'txt'
actual_files = ['-'.join([source_file.rsplit('-', 1)[0], 'actual.cc'])
for source_file in source_files]
expected_files = ['-'.join([source_file.rsplit('-', 1)[0], 'expected.' + ext])
for source_file in source_files]
if not args.apply_edits and len(actual_files) != 1:
print 'Only one test file is expected for testing without apply-edits.'
return 1
include_paths = []
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory, '../..')))
# Many gtest and gmock headers expect to have testing/gtest/include and/or
# testing/gmock/include in the include search path.
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory,
'../..',
'testing/gtest/include')))
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory,
'../..',
'testing/gmock/include')))
if len(actual_files) == 0:
print 'Tool "%s" does not have compatible test files.' % tool_to_test
return 1
# Set up the test environment.
for source, actual in zip(source_files, actual_files):
shutil.copyfile(source, actual)
# Generate a temporary compilation database to run the tool over.
with open(compile_database, 'w') as f:
f.write(_GenerateCompileCommands(actual_files, include_paths))
# Run the tool.
os.chdir(test_directory_for_tool)
exitcode = _ApplyTool(tools_clang_scripts_directory, tool_to_test,
args.tool_path, args.tool_arg,
test_directory_for_tool, actual_files,
args.apply_edits)
if (exitcode != 0):
return exitcode
# Compare actual-vs-expected results.
passed = 0
failed = 0
for expected, actual in zip(expected_files, actual_files):
print '[ RUN ] %s' % os.path.relpath(actual)
expected_output = actual_output = None
with open(expected, 'r') as f:
expected_output = f.read().splitlines()
with open(actual, 'r') as f:
actual_output = f.read().splitlines()
if actual_output != expected_output:
failed += 1
for line in difflib.unified_diff(expected_output, actual_output,
fromfile=os.path.relpath(expected),
tofile=os.path.relpath(actual)):
sys.stdout.write(line)
print '[ FAILED ] %s' % os.path.relpath(actual)
# Don't clean up the file on failure, so the results can be referenced
# more easily.
continue
print '[ OK ] %s' % os.path.relpath(actual)
passed += 1
os.remove(actual)
if failed == 0:
os.remove(compile_database)
print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files))
if passed > 0:
print '[ PASSED ] %s.' % _NumberOfTestsToString(passed)
if failed > 0:
print '[ FAILED ] %s.' % _NumberOfTestsToString(failed)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 36.641026 | 80 | 0.630394 |
8be1268f37effd125a92af9cff1c78332fa26064 | 2,073 | py | Python | day02/4-post.py | Mhh123/spider | fe4410f9f3b4a9c0a5dac51bd93a1434aaa4f888 | [
"Apache-2.0"
] | null | null | null | day02/4-post.py | Mhh123/spider | fe4410f9f3b4a9c0a5dac51bd93a1434aaa4f888 | [
"Apache-2.0"
] | null | null | null | day02/4-post.py | Mhh123/spider | fe4410f9f3b4a9c0a5dac51bd93a1434aaa4f888 | [
"Apache-2.0"
] | null | null | null | import urllib.request
import urllib.parse
post_url = 'http://fanyi.baidu.com/v2transapi'
data = {
'from': 'en',
'to': 'zh',
'query': 'baby',
'transtype': 'realtime',
'simple_means_flag': '3',
'sign': '814534.560887',
'token': '7518ccf005e3eeba364707e08b4e1140',
}
# 处理data
data = urllib.parse.urlencode(data).encode('utf-8')
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1;Win64; x64) AppleWebkit/537.36 ('
# 'KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
# }
headers = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Connection': 'keep-alive',
# 'Content-Length': '121',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'BIDUPSID=F90666F891F9ABFE46063DF4FADA3DB8; BAIDUID=5E61BD0A9ED524DED47ECEC1B349A591:FG=1; PSTM=1532328315; pgv_pvi=5610460160; PSINO=2; BDRCVFR[auK81cz0o7_]=mk3SLVN4HKm; H_PS_PSSID=1450_26908_21097_20719; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; locale=zh; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1532397315; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1532397315; to_lang_often=%5B%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%5D; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; from_lang_often=%5B%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%2C%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%5D',
'Host': 'fanyi.baidu.com',
'Origin': 'http://fanyi.baidu.com',
'Referer': 'http://fanyi.baidu.com/?aldtype=16047',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
}
request = urllib.request.Request(url=post_url, headers=headers)
response = urllib.request.urlopen(request, data)
print(response.read().decode('utf8'))
| 51.825 | 784 | 0.693681 |
16f21a5b193c8b9d79a1801505d260ca35da3824 | 5,743 | py | Python | tsut/tests/test_delete_users_and_groups.py | MishaThoughtSpot/user_tools | cf32a067af007c002fb62b5c26ee4a2afd6cccaf | [
"MIT"
] | 3 | 2020-04-28T15:43:03.000Z | 2021-01-07T23:04:28.000Z | tsut/tests/test_delete_users_and_groups.py | MishaThoughtSpot/user_tools | cf32a067af007c002fb62b5c26ee4a2afd6cccaf | [
"MIT"
] | 3 | 2019-10-21T16:03:19.000Z | 2021-01-21T01:26:40.000Z | tsut/tests/test_delete_users_and_groups.py | MishaThoughtSpot/user_tools | cf32a067af007c002fb62b5c26ee4a2afd6cccaf | [
"MIT"
] | 4 | 2019-09-30T19:49:32.000Z | 2021-04-22T18:49:17.000Z | import unittest
from tsut.api import SyncUserAndGroups
from tsut.model import UsersAndGroups, Group, User, Visibility
"""
Copyright 2018 ThoughtSpot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
TS_URL = "https://tstest" # Test TS instance.
TS_USER = "tsadmin"
TS_PASSWORD = "admin"
class TestDeleteUsersAndGroups(unittest.TestCase):
"""Tests deleting users and groups."""
@staticmethod
def create_common_users_and_groups():
"""
Creates a set of users and groups that can be used in multiple tests.
"""
auag = UsersAndGroups()
auag.add_group(
Group(
name="Group1",
display_name="This is Group 1",
description="A group for testing.",
group_names=[],
visibility=Visibility.DEFAULT,
)
)
auag.add_group(
Group(
name="Group2",
display_name="This is Group 2",
description="Another group for testing.",
group_names=["Group1"],
visibility=Visibility.NON_SHAREABLE,
)
)
# Testing for ability to handle embedded quotes.
auag.add_group(
Group(
name='Group"3"',
display_name='This is Group "3"',
description='Another "group" for testing.',
group_names=["Group1"],
visibility=Visibility.NON_SHAREABLE,
)
)
auag.add_user(
User(
name="User1",
password="pwd1",
display_name="User 1",
mail="User1@company.com",
group_names=["Group1"],
)
)
auag.add_user(
User(
name="User2",
password="pwd2",
display_name="User 2",
mail="User2@company.com",
group_names=["Group1", "Group2"],
visibility=Visibility.NON_SHAREABLE,
)
)
auag.add_user(
User(
name='User"3"',
password="pwd2",
display_name='User "3"',
mail="User2@company.com",
group_names=['Group"3"'],
)
)
print(auag)
sync = SyncUserAndGroups(
tsurl=TS_URL,
username=TS_USER,
password=TS_PASSWORD,
disable_ssl=True,
)
sync.sync_users_and_groups(auag)
def test_delete_user_list(self):
"""Tests deleting of a list of users."""
self.create_common_users_and_groups()
sync = SyncUserAndGroups(
tsurl=TS_URL,
username=TS_USER,
password=TS_PASSWORD,
disable_ssl=True,
)
sync.delete_users(usernames=["User1", "User2", "UserX"])
auag = sync.get_all_users_and_groups()
self.assertFalse(auag.has_user("User1"))
self.assertFalse(auag.has_user("User2"))
self.assertTrue(auag.has_user('User"3"'))
def test_delete_one_user(self):
"""Tests deleting a single users."""
self.create_common_users_and_groups()
sync = SyncUserAndGroups(
tsurl=TS_URL,
username=TS_USER,
password=TS_PASSWORD,
disable_ssl=True,
)
sync.delete_user(username="User1")
auag = sync.get_all_users_and_groups()
self.assertFalse(auag.has_user("User1"))
self.assertTrue(auag.has_user("User2"))
self.assertTrue(auag.has_user('User"3"'))
def test_delete_group_list(self):
"""Tests deleting of a list of groups."""
self.create_common_users_and_groups()
sync = SyncUserAndGroups(
tsurl=TS_URL,
username=TS_USER,
password=TS_PASSWORD,
disable_ssl=True,
)
sync.delete_groups(groupnames=["Group1", "Group2", "GroupX"])
auag = sync.get_all_users_and_groups()
self.assertFalse(auag.has_group("Group1"))
self.assertFalse(auag.has_group("Group2"))
self.assertTrue(auag.has_group('Group"3"'))
def test_delete_one_group(self):
"""Tests deleting a single groups."""
self.create_common_users_and_groups()
sync = SyncUserAndGroups(
tsurl=TS_URL,
username=TS_USER,
password=TS_PASSWORD,
disable_ssl=True,
)
sync.delete_group(groupname="Group1")
auag = sync.get_all_users_and_groups()
self.assertFalse(auag.has_group("Group1"))
self.assertTrue(auag.has_group("Group2"))
self.assertTrue(auag.has_group('Group"3"'))
| 33.196532 | 120 | 0.591154 |
bce7dd666a020a2ae8670a108ae53836525c9298 | 275 | py | Python | instagram/users/apps.py | xodus889/instagram | 69b3daf591a6e63ee4edfeb7bacce6276868fa93 | [
"MIT"
] | 1 | 2018-10-17T18:06:10.000Z | 2018-10-17T18:06:10.000Z | instagram/users/apps.py | xodus889/instagram | 69b3daf591a6e63ee4edfeb7bacce6276868fa93 | [
"MIT"
] | null | null | null | instagram/users/apps.py | xodus889/instagram | 69b3daf591a6e63ee4edfeb7bacce6276868fa93 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'instagram.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| 19.642857 | 37 | 0.596364 |
715e66d82c7ba44a94cff229dab27e4733b509bf | 2,276 | py | Python | homework-6-s21-malwake-git/problems.py | malwake-git/ECE20875 | 2348f638088359af962bc0d98e965c1ec0132686 | [
"Apache-2.0"
] | null | null | null | homework-6-s21-malwake-git/problems.py | malwake-git/ECE20875 | 2348f638088359af962bc0d98e965c1ec0132686 | [
"Apache-2.0"
] | null | null | null | homework-6-s21-malwake-git/problems.py | malwake-git/ECE20875 | 2348f638088359af962bc0d98e965c1ec0132686 | [
"Apache-2.0"
] | null | null | null | import re
def problem1(searchstring):
"""
Match phone numbers.
:param searchstring: string
:return: True or False
"""
str_search = re.search(r'^(\S?\d+\W?)\W?(\d+)\-(\d+)',searchstring)
#str_search1 = re.search(r'^(\S\d+\W)(?=(\s|\d)\d+)',searchstring)
#print(str_search)
if (str_search):
return True;
else:
return False;
pass
def problem2(searchstring):
"""
Extract street name from address.
:param searchstring: string
:return: string
"""
#str_search1 = searchstring[::-1];
#m = re.search('(AVE|St)\d+',str_search1);
x = re.split('\d+',searchstring)[-1]
str_search = re.search('(.*)\s(?=(Ave|St|Rd|Dr))',x)
result = str_search.group();
result1 = result.split(" ")[1:]
return " ".join(result1)
pass
def problem3(searchstring):
"""
Garble Street name.
:param searchstring: string
:return: string
"""
x = re.split('\d+',searchstring)[-1]
#print(x)
str_search = re.search('(\s.*)\s(?=(Ave|St|Rd|Dr))',x)
group1 = str_search.group();
group2 = group1
part = str(group2[::-1]); # reverse
x1 = set(x.split(" "))
g1 = set(group1.split(" "))
#last = []
#print(x1)
#print(g1)
#for i in g1:
# for j in x1:
# if i != j:
last = sorted(x1.difference(g1))
#continue
#last = x.split(" ")[2:]
last = " ".join(last)
result = searchstring.replace(x,part);
return(result + last)
pass
if __name__ == '__main__' :
print(problem1('765-494-4600')) #True
print(problem1(' 765-494-4600 ')) #False
print(problem1('(765) 494 4600')) #False
print(problem1('(765) 494-4600')) #True
print(problem1('494-4600')) #True
print(problem2('The EE building is at 465 Northwestern Ave.')) #Northwestern
print(problem2('Meet me at 201 South First St. at noon')) #South First
#print(problem2('22 What A Wonderful Ave.'))
#print(problem2('123 Mayb3 Y0u 222 Did not th1nk 333 This Through Rd.'))
print(problem3('The EE building is at 465 Northwestern Ave.'))
print(problem3('Meet me at 201 South First St. at noon'))
#print(problem3('Go West on 999 West St.'))
| 22.989899 | 80 | 0.568102 |
6df2a32d595f4e44dcbf8e0ec337c155e10f6e8d | 1,181 | py | Python | tests/test_imr/test_farms.py | pnsaevik/imr_farms | a92b3540d20f32475b7a85894e1bd737706a0a2c | [
"MIT"
] | null | null | null | tests/test_imr/test_farms.py | pnsaevik/imr_farms | a92b3540d20f32475b7a85894e1bd737706a0a2c | [
"MIT"
] | null | null | null | tests/test_imr/test_farms.py | pnsaevik/imr_farms | a92b3540d20f32475b7a85894e1bd737706a0a2c | [
"MIT"
] | null | null | null | import pytest
from imr.maps import farms
class Test_locations:
@pytest.fixture(scope='class')
def locations(self):
with farms.locations() as dset:
yield dset
def test_correct_name_and_location(self, locations):
a = locations.sel(record=23015)
assert a.navn.values.item().decode('utf8') == 'FLØDEVIGEN'
assert a.lat.values.item() == 58.424515
assert a.lon.values.item() == 8.756882
class Test_areas:
@pytest.fixture(scope='class')
def areas(self):
with farms.areas() as dset:
yield dset
def test_correct_name_and_location(self, areas):
a = areas.sel(record=11488)
assert a.navn.values.item().decode('utf8') == 'BRATTAVIKA'
assert a.ogc_wkt.values.item().decode('utf8') == (
'POLYGON ((-38902.007508 6695649.481201,'
'-38956.470111 6696165.972515,-38926.039136 6696244.396389,'
'-38897.917527 6696249.609714,-38744.108817 6696181.643866,'
'-38690.019275 6695668.944893,-38902.007508 6695649.481201))')
assert a.transverse_mercator.spatial_ref.startswith(
'PROJCS["WGS 84 / UTM zone 33N"')
| 34.735294 | 74 | 0.635055 |
db83255c83c8c1f9f6cc6b3c241faf71adcd4fd0 | 1,076 | py | Python | manager.py | herryben/bcc-minitor | 3b9b13550adbad2edff450c5e37e3482d8fd63f5 | [
"MIT"
] | null | null | null | manager.py | herryben/bcc-minitor | 3b9b13550adbad2edff450c5e37e3482d8fd63f5 | [
"MIT"
] | null | null | null | manager.py | herryben/bcc-minitor | 3b9b13550adbad2edff450c5e37e3482d8fd63f5 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#-*- coding: UTF-8 -*-
import requests
import json
import re
import pickle
from common.emailhelper import MailHelper
from config import app_config
def get_yun_bi_market_info():
r = requests.get('https://yunbi.com/markets/bcccny')
s = r.text
rgx = re.compile(r'''"bcccny":{"name":"BCC/CNY","base_unit":"bcc","quote_unit":"cny","low":(.*?)}};gon.asks''')
m = rgx.search(s)
return eval('{"low":'+m.group(1)+'}')
data = {}
try:
with open(app_config['data_path'], 'r') as f:
data = pickle.load(f)
except Exception as e:
pass
# print data, 'data'
result = get_yun_bi_market_info()
buy = float(result['last'])
print buy, 'buy', result
if abs( buy - data.get('last_buy', 0)) > app_config['buy_price_delta']:
for k, v in app_config['email'].items():
MailHelper.send_email(subject='the current buy price is %s' % buy,
content='the bbc data is %s' % result, from_addr='18647246574@163.com',
to_addr=v, nick_name='Herry')
data['last_buy'] = buy
with open(app_config['data_path'], 'w') as f:
pickle.dump(data, f)
| 28.315789 | 115 | 0.658922 |
c4ed28daad61fb66ea36937b9317e1701e7aeacf | 2,136 | py | Python | examples/slicer_with_1_plus_2_views.py | rafmudaf/dash-slicer | e959f1ea94f3bb1d061acd3f18727227a08144ed | [
"MIT"
] | 18 | 2020-12-16T09:49:00.000Z | 2022-03-18T11:38:32.000Z | examples/slicer_with_1_plus_2_views.py | rafmudaf/dash-slicer | e959f1ea94f3bb1d061acd3f18727227a08144ed | [
"MIT"
] | 43 | 2020-11-05T13:54:34.000Z | 2022-01-14T08:45:54.000Z | examples/slicer_with_1_plus_2_views.py | rafmudaf/dash-slicer | e959f1ea94f3bb1d061acd3f18727227a08144ed | [
"MIT"
] | 3 | 2020-12-15T21:20:36.000Z | 2021-07-26T17:40:02.000Z | """
An example with two slicers at the same axis, and one on another axis.
This demonstrates how multiple indicators can be shown per axis.
Sharing the same scene_id is enough for the slicers to show each-others
position. If the same volume object would be given, it works by default,
because the default scene_id is a hash of the volume object. Specifying
a scene_id provides slice position indicators even when slicing through
different volumes.
Further, this example has one slider showing data with different spacing.
Note how the indicators represent the actual position in "scene coordinates".
"""
import dash
import dash_html_components as html
from dash_slicer import VolumeSlicer
import imageio
app = dash.Dash(__name__, update_title=None)
vol1 = imageio.volread("imageio:stent.npz")
vol2 = vol1[::3, ::2, :]
spacing = 3, 2, 1
ori = 1000, 2000, 3000
slicer1 = VolumeSlicer(app, vol1, axis=1, origin=ori, scene_id="scene1", color="red")
slicer2 = VolumeSlicer(app, vol1, axis=0, origin=ori, scene_id="scene1", color="green")
slicer3 = VolumeSlicer(
app, vol2, axis=0, origin=ori, spacing=spacing, scene_id="scene1", color="blue"
)
app.layout = html.Div(
style={
"display": "grid",
"gridTemplateColumns": "40% 40%",
},
children=[
html.Div(
[
html.H1("Coronal"),
slicer1.graph,
slicer1.slider,
*slicer1.stores,
]
),
html.Div(
[
html.H1("Transversal 1"),
slicer2.graph,
slicer2.slider,
*slicer2.stores,
]
),
html.Div(),
html.Div(
[
html.H1("Transversal 2"),
slicer3.graph,
slicer3.slider,
*slicer3.stores,
]
),
],
)
if __name__ == "__main__":
# Note: dev_tools_props_check negatively affects the performance of VolumeSlicer
app.run_server(debug=True, dev_tools_props_check=False)
| 28.48 | 88 | 0.595037 |
fabf3a8b721a4d13deec71303d01919ee0d079d2 | 6,575 | py | Python | pytorch_lightning/trainer/logging.py | GimmickNG/pytorch-lightning | b36c5e86d014671b0fa922d750b27420bc73b6f9 | [
"Apache-2.0"
] | 1 | 2021-03-10T20:13:50.000Z | 2021-03-10T20:13:50.000Z | pytorch_lightning/trainer/logging.py | GimmickNG/pytorch-lightning | b36c5e86d014671b0fa922d750b27420bc73b6f9 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/logging.py | GimmickNG/pytorch-lightning | b36c5e86d014671b0fa922d750b27420bc73b6f9 | [
"Apache-2.0"
] | 1 | 2020-09-11T22:53:18.000Z | 2020-09-11T22:53:18.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Union, Iterable
import torch
from pytorch_lightning.core import memory
from pytorch_lightning.loggers import TensorBoardLogger, LightningLoggerBase, LoggerCollection
from pytorch_lightning.utilities.memory import recursive_detach
class TrainerLoggingMixin(ABC):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
current_epoch: int
on_gpu: bool
log_gpu_memory: ...
logger: Union[LightningLoggerBase, bool]
global_step: int
global_rank: int
use_dp: bool
use_ddp2: bool
default_root_dir: str
slurm_job_id: int
num_gpus: int
logged_metrics: ...
def configure_logger(self, logger):
if logger is True:
# default logger
self.logger = TensorBoardLogger(
save_dir=self.default_root_dir,
version=self.slurm_job_id,
name='lightning_logs'
)
elif logger is False:
self.logger = None
else:
if isinstance(logger, Iterable):
self.logger = LoggerCollection(logger)
else:
self.logger = logger
def metrics_to_scalars(self, metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if isinstance(v, dict):
v = self.metrics_to_scalars(v)
new_metrics[k] = v
return new_metrics
def process_output(self, output, train=False):
"""Reduces output according to the training mode.
Separates loss from logging and progress bar metrics
"""
# --------------------------
# handle single scalar only
# --------------------------
# single scalar returned from a xx_step
if isinstance(output, torch.Tensor):
progress_bar_metrics = {}
log_metrics = {}
callback_metrics = {}
hiddens = None
return output, progress_bar_metrics, log_metrics, callback_metrics, hiddens
# ---------------
# EXTRACT CALLBACK KEYS
# ---------------
# all keys not progress_bar or log are candidates for callbacks
callback_metrics = {}
for k, v in output.items():
if k not in ['progress_bar', 'log', 'hiddens']:
callback_metrics[k] = v
if train and (self.use_dp or self.use_ddp2):
num_gpus = self.num_gpus
callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
# ---------------
# EXTRACT PROGRESS BAR KEYS
# ---------------
try:
progress_output = output['progress_bar']
# reduce progress metrics for progress bar when using dp
if train and (self.use_dp or self.use_ddp2):
num_gpus = self.num_gpus
progress_output = self.reduce_distributed_output(progress_output, num_gpus)
progress_bar_metrics = progress_output
except Exception:
progress_bar_metrics = {}
# ---------------
# EXTRACT LOGGING KEYS
# ---------------
# extract metrics to log to experiment
try:
log_output = output['log']
# reduce progress metrics for progress bar when using dp
if train and (self.use_dp or self.use_ddp2):
num_gpus = self.num_gpus
log_output = self.reduce_distributed_output(log_output, num_gpus)
log_metrics = log_output
except Exception:
log_metrics = {}
# ---------------
# EXTRACT LOSS
# ---------------
# if output dict doesn't have the keyword loss
# then assume the output=loss if scalar
loss = None
if train:
try:
loss = output['loss']
except Exception:
if isinstance(output, torch.Tensor):
loss = output
else:
raise RuntimeError(
'No `loss` value in the dictionary returned from `model.training_step()`.'
)
# when using dp need to reduce the loss
if self.use_dp or self.use_ddp2:
loss = self.reduce_distributed_output(loss, self.num_gpus)
# ---------------
# EXTRACT HIDDEN
# ---------------
hiddens = output.get('hiddens')
# use every metric passed in as a candidate for callback
callback_metrics.update(progress_bar_metrics)
callback_metrics.update(log_metrics)
# detach all metrics for callbacks to prevent memory leaks
# no .item() because it will slow things down
callback_metrics = recursive_detach(callback_metrics)
return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
def reduce_distributed_output(self, output, num_gpus):
if num_gpus <= 1:
return output
# when using DP, we get one output per gpu
# average outputs and return
if isinstance(output, torch.Tensor):
return output.mean()
for k, v in output.items():
# recurse on nested dics
if isinstance(output[k], dict):
output[k] = self.reduce_distributed_output(output[k], num_gpus)
# compute the average of scalars
elif isinstance(output[k], list):
output[k] = sum(output[k]) / len(output[k])
# do nothing when there's a scalar
elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
pass
# do not reduce metrics that have batch size > num gpus
elif output[k].size(0) <= num_gpus:
output[k] = torch.mean(output[k])
return output
| 33.891753 | 98 | 0.582662 |
646dadcfa4c4a4376475989afe66d4d884d91cf9 | 416 | py | Python | app/core/migrations/0008_auto_20200105_1716.py | Raysultan/roscosmos-stats | 8931ee824c4e4cd67ae4f86ce221515b00d9e872 | [
"MIT"
] | 5 | 2020-11-24T09:57:36.000Z | 2021-11-17T08:02:29.000Z | app/core/migrations/0008_auto_20200105_1716.py | raisultan/roscosmos-api | 8931ee824c4e4cd67ae4f86ce221515b00d9e872 | [
"MIT"
] | null | null | null | app/core/migrations/0008_auto_20200105_1716.py | raisultan/roscosmos-api | 8931ee824c4e4cd67ae4f86ce221515b00d9e872 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-01-05 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20200104_1358'),
]
operations = [
migrations.AlterField(
model_name='launchvehicle',
name='space_tugs',
field=models.ManyToManyField(null=True, to='core.SpaceTug'),
),
]
| 21.894737 | 72 | 0.612981 |
82ade38974e40cd46dd2f0bf2e37e99e412dd7e2 | 7,050 | py | Python | PaddleNLP/pretrain_language_models/BERT/optimization.py | suytingwan/models | ccdbfe77d071cc19b55fb9f4b738912e35d982ef | [
"Apache-2.0"
] | 6 | 2021-06-08T13:19:35.000Z | 2021-06-24T15:08:54.000Z | legacy/pretrain_language_models/BERT/optimization.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | 1 | 2021-11-22T08:11:08.000Z | 2021-11-22T08:11:08.000Z | legacy/pretrain_language_models/BERT/optimization.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | 4 | 2021-08-23T07:46:06.000Z | 2021-09-23T08:37:03.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization and learning rate scheduling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from utils.fp16 import create_master_params_grads, master_param_to_train_param, apply_dynamic_loss_scaling
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr
def optimization(loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
use_fp16=False,
use_dynamic_loss_scaling=False,
init_loss_scaling=1.0,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8):
scheduled_lr, loss_scaling = None, None
if scheduler == 'noam_decay':
if warmup_steps > 0:
scheduled_lr = fluid.layers.learning_rate_scheduler\
.noam_decay(1/(warmup_steps *(learning_rate ** 2)),
warmup_steps)
else:
print(
"WARNING: noam decay of learning rate should have postive warmup "
"steps but given {}, using constant learning rate instead!"
.format(warmup_steps))
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
elif scheduler == 'linear_warmup_decay':
if warmup_steps > 0:
scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps,
num_train_steps)
else:
print(
"WARNING: linear warmup decay of learning rate should have "
"postive warmup steps but given {}, use constant learning rate "
"instead!".format(warmup_steps))
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay'")
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0))
def exclude_from_weight_decay(param):
name = param.name.rstrip(".master")
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
param_list = dict()
if use_fp16:
loss_scaling = fluid.layers.create_global_var(
name=fluid.unique_name.generate("loss_scaling"),
shape=[1],
value=init_loss_scaling,
dtype='float32',
persistable=True)
loss *= loss_scaling
param_grads = optimizer.backward(loss)
master_param_grads = create_master_params_grads(
param_grads, train_program, startup_prog, loss_scaling)
if weight_decay > 0:
for param, _ in master_param_grads:
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
if use_dynamic_loss_scaling:
apply_dynamic_loss_scaling(
loss_scaling, master_param_grads, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio)
optimizer.apply_gradients(master_param_grads)
if weight_decay > 0:
for param, grad in master_param_grads:
if exclude_from_weight_decay(param):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
master_param_to_train_param(master_param_grads, param_grads,
train_program)
else:
if weight_decay > 0:
for param in train_program.all_parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
_, param_grads = optimizer.minimize(loss)
if weight_decay > 0:
for param, grad in param_grads:
if exclude_from_weight_decay(param):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
return scheduled_lr, loss_scaling
| 39.606742 | 106 | 0.603121 |
89e99878b1c9b7c8f91ea887ec09119b70a47a56 | 1,420 | py | Python | data/external/repositories_2to3/103259/Kaggle_The_Hunt_for_Prohibited_Content-master/Python/generate_weighted_sample.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/103259/Kaggle_The_Hunt_for_Prohibited_Content-master/Python/generate_weighted_sample.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/103259/Kaggle_The_Hunt_for_Prohibited_Content-master/Python/generate_weighted_sample.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | # -*- coding: UTF-8 -*-
"""
Python version: 2.7.6
Version: 1.0 at Sep 01 2014
Author: Chenglong Chen < yr@Kaggle >
Email: c.chenglong@gmail.com
"""
import re
import argparse
def getWeightedVWFile(vwFileTrain, vwWeightedFileTrain, posWeight, negWeight):
"""
"""
print(" Weight for positive samples: %s" % posWeight)
print(" Weight for negative samples: %s" % negWeight)
# now we write to files
with open(vwWeightedFileTrain, "wb") as weightedWriter:
for e,line in enumerate(open(vwFileTrain, "rb")):
# get the label
label = int(re.search(r"(^-?[0-9]) '", line).group(1))
#posWeight = 1
#negWeight = 13.5
if label == 1:
newLine = line[0]+" "+str(posWeight)+line[1:]
else:
newLine = line[:2]+" "+str(negWeight)+line[2:]
weightedWriter.write( newLine )
if (e+1)%1000000 == 0:
print(" Wrote %s" % (e+1))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="generate weighted samples")
parser.add_argument("input")
parser.add_argument("output")
parser.add_argument("posWeight")
parser.add_argument("negWeight")
args = parser.parse_args()
getWeightedVWFile(args.input, args.output,
args.posWeight, args.negWeight) | 33.023256 | 79 | 0.569014 |
980a4934525325196216b42827d5c1164a6d0ba7 | 987 | py | Python | u2net_portrait-keras.py | Voinic/u2net-keras | 857b465940ef098d3abc2648d2454832065cc132 | [
"MIT"
] | 5 | 2021-02-25T19:39:07.000Z | 2021-09-14T14:46:52.000Z | u2net_portrait-keras.py | Voinic/u2net-keras | 857b465940ef098d3abc2648d2454832065cc132 | [
"MIT"
] | null | null | null | u2net_portrait-keras.py | Voinic/u2net-keras | 857b465940ef098d3abc2648d2454832065cc132 | [
"MIT"
] | null | null | null | from keras.models import load_model
import cv2
import numpy as np
import sys
input = sys.argv[1]
output = sys.argv[2]
model = './u2net_portrait_keras.h5'
# load model
u2netp_keras = load_model(model, compile=False)
# load image
image = cv2.imread(input)
# normalize input image
input = cv2.resize(image, (512, 512), interpolation=cv2.INTER_CUBIC)
tmpImg = np.zeros((512, 512, 3))
input = input/np.max(input)
tmpImg[:,:,0] = (input[:,:,2]-0.406)/0.225
tmpImg[:,:,1] = (input[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (input[:,:,0]-0.485)/0.229
# convert BGR to RGB
tmpImg = tmpImg.transpose((2, 0, 1))
tmpImg = tmpImg[np.newaxis,:,:,:]
# predict
d1,d2,d3,d4,d5,d6,d7 = u2netp_keras.predict(tmpImg)
pred = np.array(1.0 - d1[:,0,:,:])[0]
# normalize
ma = np.max(pred)
mi = np.min(pred)
pred = (pred-mi)/(ma-mi)
pred = pred.squeeze()
pred = (pred*255).astype(np.uint8)
out = cv2.resize(pred, image.shape[1::-1], interpolation=cv2.INTER_CUBIC)
# save image
cv2.imwrite(output, out) | 19.74 | 73 | 0.666667 |
64ec6e59e9cd04839c4b2d75000d618984364fb9 | 1,394 | py | Python | minos/cli/api/new.py | Clariteia/minos | 49dc01dfd7972b192ce84636c11c6b9255f19d0f | [
"MIT"
] | 7 | 2022-01-29T11:51:55.000Z | 2022-03-05T12:22:11.000Z | minos/cli/api/new.py | minos-framework/minos-cli | 49dc01dfd7972b192ce84636c11c6b9255f19d0f | [
"MIT"
] | 35 | 2022-01-31T10:27:49.000Z | 2022-03-16T15:22:56.000Z | minos/cli/api/new.py | Clariteia/minos | 49dc01dfd7972b192ce84636c11c6b9255f19d0f | [
"MIT"
] | 1 | 2022-03-03T10:32:44.000Z | 2022-03-03T10:32:44.000Z | from pathlib import (
Path,
)
import typer
from ..consoles import (
console,
)
from ..pathlib import (
get_microservices_directory,
)
from ..templating import (
TemplateFetcher,
TemplateProcessor,
)
app = typer.Typer(add_completion=False)
@app.command("project")
def new_project(path: Path) -> None:
"""Initialize a project on the given directory."""
console.print(":wrench: Creating new Project...\n")
fetcher = TemplateFetcher.from_name("project-init")
processor = TemplateProcessor.from_fetcher(fetcher, path.absolute(), defaults={"project_name": path.name})
processor.render()
@app.command("microservice")
def new_microservice(name: str) -> None:
"""Initialize a microservice on the given directory."""
console.print(":wrench: Creating new Microservice...\n")
try:
microservice_path = get_microservices_directory(Path.cwd()) / name
except ValueError:
console.print("No Minos project found. Consider using 'minos new project'")
raise typer.Exit(code=1)
fetcher = TemplateFetcher.from_name("microservice-init")
processor = TemplateProcessor.from_fetcher(fetcher, microservice_path, defaults={"name": name})
processor.render()
(microservice_path / ".build_docker_compose.txt").unlink()
@app.callback()
def callback():
"""Creates a new project or microservice in a given path"""
| 25.814815 | 110 | 0.705882 |
11f5474cf06aa06243fdab79a0081811a1acd30c | 9,798 | py | Python | 002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/NeHe/lesson48/ArcBall.py | lhl/vrdev | fc1a9af2b51d159c99c8779349ef3392a70ed9ed | [
"Apache-2.0"
] | 12 | 2015-12-02T02:36:36.000Z | 2020-09-20T17:14:24.000Z | 002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/NeHe/lesson48/ArcBall.py | lhl/vrdev | fc1a9af2b51d159c99c8779349ef3392a70ed9ed | [
"Apache-2.0"
] | null | null | null | 002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/NeHe/lesson48/ArcBall.py | lhl/vrdev | fc1a9af2b51d159c99c8779349ef3392a70ed9ed | [
"Apache-2.0"
] | 8 | 2016-11-02T11:17:04.000Z | 2021-10-21T07:42:19.000Z | """
ArcBall.py -- Math utilities, vector, matrix types and ArcBall quaternion rotation class
>>> unit_test_ArcBall_module ()
unit testing ArcBall
Quat for first drag
[ 0.08438914 -0.08534209 -0.06240178 0.99080837]
First transform
[[ 0.97764552 -0.1380603 0.15858325 0. ]
[ 0.10925253 0.97796899 0.17787792 0. ]
[-0.17964739 -0.15657592 0.97119039 0. ]
[ 0. 0. 0. 1. ]]
LastRot at end of first drag
[[ 0.97764552 -0.1380603 0.15858325]
[ 0.10925253 0.97796899 0.17787792]
[-0.17964739 -0.15657592 0.97119039]]
Quat for second drag
[ 0.00710336 0.31832787 0.02679029 0.94757545]
Second transform
[[ 0.88022292 -0.08322023 -0.46720669 0. ]
[ 0.14910145 0.98314685 0.10578787 0. ]
[ 0.45052907 -0.16277808 0.8777966 0. ]
[ 0. 0. 0. 1.00000001]]
"""
try:
import numpy as Numeric
def sumDot( a,b ):
return Numeric.dot (a, b)
except ImportError, err:
try:
import Numeric
def sumDot( a,b ):
return sum (Numeric.dot (a, b) )
except ImportError, err:
print "This demo requires the numpy or Numeric extension, sorry"
import sys
sys.exit()
import copy
from math import sqrt
# //assuming IEEE-754(GLfloat), which i believe has max precision of 7 bits
Epsilon = 1.0e-5
class ArcBallT:
def __init__ (self, NewWidth, NewHeight):
self.m_StVec = Vector3fT ()
self.m_EnVec = Vector3fT ()
self.m_AdjustWidth = 1.0
self.m_AdjustHeight = 1.0
self.setBounds (NewWidth, NewHeight)
def __str__ (self):
str_rep = ""
str_rep += "StVec = " + str (self.m_StVec)
str_rep += "\nEnVec = " + str (self.m_EnVec)
str_rep += "\n scale coords %f %f" % (self.m_AdjustWidth, self.m_AdjustHeight)
return str_rep
def setBounds (self, NewWidth, NewHeight):
# //Set new bounds
assert (NewWidth > 1.0 and NewHeight > 1.0), "Invalid width or height for bounds."
# //Set adjustment factor for width/height
self.m_AdjustWidth = 1.0 / ((NewWidth - 1.0) * 0.5)
self.m_AdjustHeight = 1.0 / ((NewHeight - 1.0) * 0.5)
def _mapToSphere (self, NewPt):
# Given a new window coordinate, will modify NewVec in place
X = 0
Y = 1
Z = 2
NewVec = Vector3fT ()
# //Copy paramter into temp point
TempPt = copy.copy (NewPt)
print 'NewPt', NewPt, TempPt
# //Adjust point coords and scale down to range of [-1 ... 1]
TempPt [X] = (NewPt [X] * self.m_AdjustWidth) - 1.0
TempPt [Y] = 1.0 - (NewPt [Y] * self.m_AdjustHeight)
# //Compute the square of the length of the vector to the point from the center
length = sumDot( TempPt, TempPt)
# //If the point is mapped outside of the sphere... (length > radius squared)
if (length > 1.0):
# //Compute a normalizing factor (radius / sqrt(length))
norm = 1.0 / sqrt (length);
# //Return the "normalized" vector, a point on the sphere
NewVec [X] = TempPt [X] * norm;
NewVec [Y] = TempPt [Y] * norm;
NewVec [Z] = 0.0;
else: # //Else it's on the inside
# //Return a vector to a point mapped inside the sphere sqrt(radius squared - length)
NewVec [X] = TempPt [X]
NewVec [Y] = TempPt [Y]
NewVec [Z] = sqrt (1.0 - length)
return NewVec
def click (self, NewPt):
# //Mouse down (Point2fT
self.m_StVec = self._mapToSphere (NewPt)
return
def drag (self, NewPt):
# //Mouse drag, calculate rotation (Point2fT Quat4fT)
""" drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec
"""
X = 0
Y = 1
Z = 2
W = 3
self.m_EnVec = self._mapToSphere (NewPt)
# //Compute the vector perpendicular to the begin and end vectors
# Perp = Vector3fT ()
Perp = Vector3fCross(self.m_StVec, self.m_EnVec);
NewRot = Quat4fT ()
# //Compute the length of the perpendicular vector
if (Vector3fLength(Perp) > Epsilon): # //if its non-zero
# //We're ok, so return the perpendicular vector as the transform after all
NewRot[X] = Perp[X];
NewRot[Y] = Perp[Y];
NewRot[Z] = Perp[Z];
# //In the quaternion values, w is cosine (theta / 2), where theta is rotation angle
NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec);
else: # //if its zero
# //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation)
NewRot.X = NewRot.Y = NewRot.Z = NewRot.W = 0.0;
return NewRot
# ##################### Math utility ##########################################
def Matrix4fT ():
return Numeric.identity (4, 'f')
def Matrix3fT ():
return Numeric.identity (3, 'f')
def Quat4fT ():
return Numeric.zeros (4, 'f')
def Vector3fT ():
return Numeric.zeros (3, 'f')
def Point2fT (x = 0.0, y = 0.0):
pt = Numeric.zeros (2, 'f')
pt [0] = x
pt [1] = y
return pt
def Vector3fDot(u, v):
# Dot product of two 3f vectors
dotprod = Numeric.dot (u,v)
return dotprod
def Vector3fCross(u, v):
# Cross product of two 3f vectors
X = 0
Y = 1
Z = 2
cross = Numeric.zeros (3, 'f')
cross [X] = (u[Y] * v[Z]) - (u[Z] * v[Y])
cross [Y] = (u[Z] * v[X]) - (u[X] * v[Z])
cross [Z] = (u[X] * v[Y]) - (u[Y] * v[X])
return cross
def Vector3fLength (u):
mag_squared = sumDot(u,u)
mag = sqrt (mag_squared)
return mag
def Matrix3fSetIdentity ():
return Numeric.identity (3, 'f')
def Matrix3fMulMatrix3f (matrix_a, matrix_b):
return sumDot( matrix_a, matrix_b )
def Matrix4fSVD (NewObj):
X = 0
Y = 1
Z = 2
s = sqrt (
( (NewObj [X][X] * NewObj [X][X]) + (NewObj [X][Y] * NewObj [X][Y]) + (NewObj [X][Z] * NewObj [X][Z]) +
(NewObj [Y][X] * NewObj [Y][X]) + (NewObj [Y][Y] * NewObj [Y][Y]) + (NewObj [Y][Z] * NewObj [Y][Z]) +
(NewObj [Z][X] * NewObj [Z][X]) + (NewObj [Z][Y] * NewObj [Z][Y]) + (NewObj [Z][Z] * NewObj [Z][Z]) ) / 3.0 )
return s
def Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix):
# Modifies NewObj in-place by replacing its upper 3x3 portion from the
# passed in 3x3 matrix.
# NewObj = Matrix4fT ()
NewObj [0:3,0:3] = three_by_three_matrix
return NewObj
# /**
# * Sets the rotational component (upper 3x3) of this matrix to the matrix
# * values in the T precision Matrix3d argument; the other elements of
# * this matrix are unchanged; a singular value decomposition is performed
# * on this object's upper 3x3 matrix to factor out the scale, then this
# * object's upper 3x3 matrix components are replaced by the passed rotation
# * components, and then the scale is reapplied to the rotational
# * components.
# * @param three_by_three_matrix T precision 3x3 matrix
# */
def Matrix4fSetRotationFromMatrix3f (NewObj, three_by_three_matrix):
scale = Matrix4fSVD (NewObj)
NewObj = Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix);
scaled_NewObj = NewObj * scale # Matrix4fMulRotationScale(NewObj, scale);
return scaled_NewObj
def Matrix3fSetRotationFromQuat4f (q1):
# Converts the H quaternion q1 into a new equivalent 3x3 rotation matrix.
X = 0
Y = 1
Z = 2
W = 3
NewObj = Matrix3fT ()
n = sumDot(q1, q1)
s = 0.0
if (n > 0.0):
s = 2.0 / n
xs = q1 [X] * s; ys = q1 [Y] * s; zs = q1 [Z] * s
wx = q1 [W] * xs; wy = q1 [W] * ys; wz = q1 [W] * zs
xx = q1 [X] * xs; xy = q1 [X] * ys; xz = q1 [X] * zs
yy = q1 [Y] * ys; yz = q1 [Y] * zs; zz = q1 [Z] * zs
# This math all comes about by way of algebra, complex math, and trig identities.
# See Lengyel pages 88-92
NewObj [X][X] = 1.0 - (yy + zz); NewObj [Y][X] = xy - wz; NewObj [Z][X] = xz + wy;
NewObj [X][Y] = xy + wz; NewObj [Y][Y] = 1.0 - (xx + zz); NewObj [Z][Y] = yz - wx;
NewObj [X][Z] = xz - wy; NewObj [Y][Z] = yz + wx; NewObj [Z][Z] = 1.0 - (xx + yy)
return NewObj
def unit_test_ArcBall_module ():
# Unit testing of the ArcBall calss and the real math behind it.
# Simulates a click and drag followed by another click and drag.
print "unit testing ArcBall"
Transform = Matrix4fT ()
LastRot = Matrix3fT ()
ThisRot = Matrix3fT ()
ArcBall = ArcBallT (640, 480)
# print "The ArcBall with NO click"
# print ArcBall
# First click
LastRot = copy.copy (ThisRot)
mouse_pt = Point2fT (500,250)
ArcBall.click (mouse_pt)
# print "The ArcBall with first click"
# print ArcBall
# First drag
mouse_pt = Point2fT (475, 275)
ThisQuat = ArcBall.drag (mouse_pt)
# print "The ArcBall after first drag"
# print ArcBall
# print
# print
print "Quat for first drag"
print ThisQuat
ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat)
# Linear Algebra matrix multiplication A = old, B = New : C = A * B
ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot)
Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot)
print "First transform"
print Transform
# Done with first drag
# second click
LastRot = copy.copy (ThisRot)
print "LastRot at end of first drag"
print LastRot
mouse_pt = Point2fT (350,260)
ArcBall.click (mouse_pt)
# second drag
mouse_pt = Point2fT (450, 260)
ThisQuat = ArcBall.drag (mouse_pt)
# print "The ArcBall"
# print ArcBall
print "Quat for second drag"
print ThisQuat
ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat)
ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot)
# print ThisRot
Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot)
print "Second transform"
print Transform
# Done with second drag
LastRot = copy.copy (ThisRot)
def _test ():
# This will run doctest's unit testing capability.
# see http://www.python.org/doc/current/lib/module-doctest.html
#
# doctest introspects the ArcBall module for all docstrings
# that look like interactive python sessions and invokes
# the same commands then and there as unit tests to compare
# the output generated. Very nice for unit testing and
# documentation.
import doctest, ArcBall
return doctest.testmod (ArcBall)
if __name__ == "__main__":
# Invoke our function that runs python's doctest unit testing tool.
_test ()
# unit_test ()
| 30.240741 | 111 | 0.654317 |
8f2adec8b2388a8ba1e95922a961e0f34e2392fc | 1,208 | py | Python | conferencio/speaker/models.py | hotsyk/conferencio | 56ff18aa22a1bd55a42ef740e2190240b29a38c7 | [
"BSD-3-Clause"
] | null | null | null | conferencio/speaker/models.py | hotsyk/conferencio | 56ff18aa22a1bd55a42ef740e2190240b29a38c7 | [
"BSD-3-Clause"
] | 6 | 2015-01-10T11:34:34.000Z | 2015-01-10T11:35:41.000Z | conferencio/speaker/models.py | hotsyk/conferencio | 56ff18aa22a1bd55a42ef740e2190240b29a38c7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from postgres.fields import json_field
from event.models import Event
from users.models import User
class Speaker(models.Model):
"""
Model to keep speakers data
"""
user = models.ForeignKey(User)
data = json_field.JSONField(default={})
talks = models.ManyToManyField(Event, through='EventTalks')
def __unicode__(self):
return "{0} {1}".format(self.user.first_name, self.user.last_name)
class EventTalks(models.Model):
"""
Link speakers to event with talk
"""
event = models.ForeignKey(Event)
speaker = models.ForeignKey(Speaker)
title = models.CharField(_('Title of the talk'), max_length=1024)
data = json_field.JSONField(default={})
def __unicode__(self):
return self.title
class Proposal(models.Model):
"""
Store proposals for talks given for events
"""
event = models.ForeignKey(Event)
proposer = models.ForeignKey(User)
title = models.CharField(_('Title of the talk'), max_length=1024)
data = json_field.JSONField(default={})
def __unicode__(self):
return self.title
| 25.702128 | 74 | 0.686258 |
b24c2182c697ba856d1d59fdcff0abf1ed0bd859 | 4,448 | py | Python | WeeklyActivityChrome.py | samratpodder/MakautWeeklyActivityFillFormAutomated | 7ec7200cd73cdfd69fd0e2b6f48dc2e959523f7f | [
"MIT"
] | 3 | 2020-05-03T07:49:50.000Z | 2021-07-02T15:13:08.000Z | WeeklyActivityChrome.py | samratpodder/MakautWeeklyActivityFillFormAutomated | 7ec7200cd73cdfd69fd0e2b6f48dc2e959523f7f | [
"MIT"
] | null | null | null | WeeklyActivityChrome.py | samratpodder/MakautWeeklyActivityFillFormAutomated | 7ec7200cd73cdfd69fd0e2b6f48dc2e959523f7f | [
"MIT"
] | null | null | null | import pandas as pd
from selenium import webdriver
from datetime import datetime, timedelta
from selenium.webdriver.support.ui import Select
import math
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def isnan(value):
try:
return math.isnan(float(value))
except:
return False
source_file_path = "Replace this text with your csv directory" #<-------------------------------
source_df = pd.read_csv(source_file_path)
source_df = source_df.loc[0:433,
'Faculty Name':'Mention Google Class Code/Link/Meeting ID (If applicable)']
cse_df = source_df[source_df["Stream-Sec"].str.find("CSE") >= 0]
# print(cse_df.shape)
# print(cse_df)
cse_df.to_csv("Filtered CSE Classes.csv")
browser = webdriver.Chrome(executable_path=r'Replace this text with your directory') #<------------------------------
browser.set_page_load_timeout(240)
browser.get('https://makaut1.ucanapply.com/smartexam/public/student/week-report-activity/create')
# student=browser.find_element_by_name('STUDENT')
# student.click()
print("Click on Student :)")
print("After logging in press Enter in console")
wait = input()
browser.get("https://makaut1.ucanapply.com/smartexam/public/student/week-report-activity/create")
# weeklyactivity = browser.find_element_by_id('week-report-activity/create')
# weeklyactivity.click()
week = datetime.now()
topic = ""
duration = ""
takenby = ""
platform = ""
date = ""
link = ""
i = int(input("Previous Entries : "))
for index, row in cse_df.iterrows():
if not i == 0:
i -= 1
continue
browser.get("https://makaut1.ucanapply.com/smartexam/public/student/week-report-activity/create")
sleep(3)
weekobj = browser.find_element_by_id("week")
topicfld = browser.find_element_by_id("topic_covered")
platusd = browser.find_element_by_id("platform_used")
dateobj = browser.find_element_by_id("date_tme")
leclink = browser.find_element_by_id("record_lecture_upload_link")
durationform = browser.find_element_by_id("duration_in_min")
interraction = browser.find_element_by_id("post_class_interraction_note")
assignmentrec = browser.find_element_by_id("assignment_received")
assignmentsub = browser.find_element_by_id("assignment_submitted")
test = browser.find_element_by_id("test_attended_if_any")
self = browser.find_element_by_id("daily_self_acitvity")
remark = browser.find_element_by_id("remark")
sem = Select(browser.find_element_by_id("SEMCODE"))
sem.select_by_value("SM02")
sleep(3)
course = Select(browser.find_element_by_id("COURSECD"))
course.select_by_value('C000024')
sleep(3)
prof = Select(browser.find_element_by_id("class_taken_by"))
sleep(3)
# week = datetime.strptime(row[2] , '%mm %dd %yyyy')
date = str(row[2]) + "-" + str(row[3])
topic = row[7]
duration = row[3]
takenby = row[0]
platform = row[9]
link = row[10]
code = row[6]
# pcin = row[12]
# assignmentR = row[13]
# assignmentS = row[14]
# testatt = row[15]
print("Date/Time = " + str(date))
print("Topic Covered= " + str(topic))
print("Course Code = " + str(code))
print("Time= " + str(duration))
print("Taken by " + str(takenby))
print("Platform Used = " + str(platform))
print("Recorded Lecture Link = ", end="")
if not link == "":
print(link)
else:
print("Not Available")
if not isnan(duration):
durationform.send_keys(duration)
else:
durationform.send_keys("NA")
interraction.send_keys('NA')
assignmentrec.send_keys('NA')
assignmentsub.send_keys("NA")
test.send_keys("NA")
self.send_keys("NA")
remark.send_keys("NA")
if not isnan(topic):
topicfld.send_keys(topic)
else:
topicfld.send_keys("NA")
if not isnan(link):
# WebDriverWait(browser, 20).until(
# EC.element_to_be_clickable((By.CLASS_NAME, "record_lecture_upload_link"))).click()
leclink.send_keys(link)
else:
leclink.send_keys("NA")
if not isnan(platform):
platusd.send_keys(platform)
else:
platusd.send_keys("NA")
print("Press Enter for next entry")
wait0 = input() # waiting for interrupt
submit = browser.find_element_by_id('btnSubmit')
submit.click()
| 35.301587 | 117 | 0.680081 |
e2640507f059cea54e6b96d6d00fe143752846e9 | 30,565 | py | Python | sdk/storage/azure-storage-blob/tests/test_blob_client_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1 | 2021-08-14T04:33:48.000Z | 2021-08-14T04:33:48.000Z | sdk/storage/azure-storage-blob/tests/test_blob_client_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2 | 2021-08-24T15:32:30.000Z | 2021-08-24T23:21:34.000Z | sdk/storage/azure-storage-blob/tests/test_blob_client_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1 | 2016-04-19T22:15:47.000Z | 2016-04-19T22:15:47.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import pytest
import platform
import asyncio
from azure.core.credentials import AzureSasCredential
from azure.storage.blob import VERSION
from azure.storage.blob.aio import (
BlobServiceClient,
ContainerClient,
BlobClient,
)
from _shared.testcase import GlobalStorageAccountPreparer
from devtools_testutils.storage.aio import AsyncStorageTestCase
# ------------------------------------------------------------------------------
SERVICES = {
BlobServiceClient: 'blob',
ContainerClient: 'blob',
BlobClient: 'blob',
}
_CONNECTION_ENDPOINTS = {'blob': 'BlobEndpoint'}
_CONNECTION_ENDPOINTS_SECONDARY = {'blob': 'BlobSecondaryEndpoint'}
class StorageClientTestAsync(AsyncStorageTestCase):
def setUp(self):
super(StorageClientTestAsync, self).setUp()
self.sas_token = self.generate_sas_token()
self.token_credential = self.generate_oauth_token()
# --Helpers-----------------------------------------------------------------
def validate_standard_account_endpoints(self, service, url_type, account_name, account_key):
self.assertIsNotNone(service)
self.assertEqual(service.account_name, account_name)
self.assertEqual(service.credential.account_name, account_name)
self.assertEqual(service.credential.account_key, account_key)
self.assertTrue('{}.{}.core.windows.net'.format(account_name, url_type) in service.url)
self.assertTrue('{}-secondary.{}.core.windows.net'.format(account_name, url_type) in service.secondary_endpoint)
# --Direct Parameters Test Cases --------------------------------------------
@GlobalStorageAccountPreparer()
def test_create_service_with_key_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for client, url in SERVICES.items():
# Act
service = client(
self.account_url(storage_account, "blob"), credential=storage_account_key, container_name='foo', blob_name='bar')
# Assert
self.validate_standard_account_endpoints(service, url, storage_account.name, storage_account_key)
self.assertEqual(service.scheme, 'https')
@GlobalStorageAccountPreparer()
def test_create_service_with_connection_string_async(self, resource_group, location, storage_account, storage_account_key):
for service_type in SERVICES.items():
# Act
service = service_type[0].from_connection_string(
self.connection_string(storage_account, storage_account_key), container_name="test", blob_name="test")
# Assert
self.validate_standard_account_endpoints(service, service_type[1], storage_account.name, storage_account_key)
self.assertEqual(service.scheme, 'https')
@GlobalStorageAccountPreparer()
def test_create_service_with_sas_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES:
# Act
service = service_type(
self.account_url(storage_account, "blob"), credential=self.sas_token, container_name='foo', blob_name='bar')
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertTrue(service.url.startswith('https://' + storage_account.name + '.blob.core.windows.net'))
self.assertTrue(service.url.endswith(self.sas_token))
self.assertIsNone(service.credential)
@GlobalStorageAccountPreparer()
def test_create_service_with_sas_credential_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
sas_credential = AzureSasCredential(self.sas_token)
for service_type in SERVICES:
# Act
service = service_type(
self.account_url(storage_account, "blob"), credential=sas_credential, container_name='foo', blob_name='bar')
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertTrue(service.url.startswith('https://' + storage_account.name + '.blob.core.windows.net'))
self.assertFalse(service.url.endswith(self.sas_token))
self.assertEqual(service.credential, sas_credential)
@GlobalStorageAccountPreparer()
def test_create_service_with_sas_credential_url_raises_if_sas_is_in_uri_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
sas_credential = AzureSasCredential(self.sas_token)
for service_type in SERVICES:
# Act
with self.assertRaises(ValueError):
service = service_type(
self.account_url(storage_account, "blob") + "?sig=foo", credential=sas_credential, container_name='foo', blob_name='bar')
@GlobalStorageAccountPreparer()
def test_create_service_with_token_async(self, resource_group, location, storage_account, storage_account_key):
for service_type in SERVICES:
# Act
service = service_type(
self.account_url(storage_account, "blob"), credential=self.token_credential, container_name='foo', blob_name='bar')
# Assert
self.assertIsNotNone(service)
self.assertTrue(service.url.startswith('https://' + storage_account.name + '.blob.core.windows.net'))
self.assertEqual(service.credential, self.token_credential)
self.assertEqual(service.account_name, storage_account.name)
@GlobalStorageAccountPreparer()
def test_create_service_with_token_and_http_async(self, resource_group, location, storage_account, storage_account_key):
for service_type in SERVICES:
# Act
with self.assertRaises(ValueError):
url = self.account_url(storage_account, "blob").replace('https', 'http')
service_type(url, credential=self.token_credential, container_name='foo', blob_name='bar')
@GlobalStorageAccountPreparer()
def test_create_service_china_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
# Act
url = self.account_url(storage_account, "blob").replace('core.windows.net', 'core.chinacloudapi.cn')
service = service_type[0](
url, credential=storage_account_key, container_name='foo', blob_name='bar')
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertEqual(service.credential.account_name, storage_account.name)
self.assertEqual(service.credential.account_key, storage_account_key)
self.assertTrue(service.primary_endpoint.startswith(
'https://{}.{}.core.chinacloudapi.cn'.format(storage_account.name, service_type[1])))
self.assertTrue(service.secondary_endpoint.startswith(
'https://{}-secondary.{}.core.chinacloudapi.cn'.format(storage_account.name, service_type[1])))
@GlobalStorageAccountPreparer()
def test_create_service_protocol_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
# Act
url = self.account_url(storage_account, "blob").replace('https', 'http')
service = service_type[0](
url, credential=storage_account_key, container_name='foo', blob_name='bar')
# Assert
self.validate_standard_account_endpoints(service, service_type[1], storage_account.name, storage_account_key)
self.assertEqual(service.scheme, 'http')
@GlobalStorageAccountPreparer()
def test_create_blob_service_anonymous_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
BLOB_SERVICES = [BlobServiceClient, ContainerClient, BlobClient]
for service_type in BLOB_SERVICES:
# Act
service = service_type(self.account_url(storage_account, "blob"), container_name='foo', blob_name='bar')
# Assert
self.assertIsNotNone(service)
self.assertTrue(service.url.startswith('https://' + storage_account.name + '.blob.core.windows.net'))
self.assertIsNone(service.credential)
self.assertEqual(service.account_name, storage_account.name)
@GlobalStorageAccountPreparer()
def test_create_blob_service_custom_domain_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
BLOB_SERVICES = [BlobServiceClient, ContainerClient, BlobClient]
for service_type in BLOB_SERVICES:
# Act
service = service_type(
'www.mydomain.com',
credential={'account_name': storage_account.name, 'account_key': storage_account_key},
container_name='foo',
blob_name='bar')
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertEqual(service.credential.account_name, storage_account.name)
self.assertEqual(service.credential.account_key, storage_account_key)
self.assertTrue(service.primary_endpoint.startswith('https://www.mydomain.com/'))
self.assertTrue(service.secondary_endpoint.startswith('https://' + storage_account.name + '-secondary.blob.core.windows.net'))
@GlobalStorageAccountPreparer()
def test_create_service_with_socket_timeout_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
# Act
default_service = service_type[0](
self.account_url(storage_account, "blob"), credential=storage_account_key,
container_name='foo', blob_name='bar')
service = service_type[0](
self.account_url(storage_account, "blob"), credential=storage_account_key,
container_name='foo', blob_name='bar', connection_timeout=22)
# Assert
self.validate_standard_account_endpoints(service, service_type[1], storage_account.name, storage_account_key)
assert service._client._client._pipeline._transport.connection_config.timeout == 22
assert default_service._client._client._pipeline._transport.connection_config.timeout in [20, (20, 2000)]
# --Connection String Test Cases --------------------------------------------
@GlobalStorageAccountPreparer()
def test_create_service_with_connection_string_key_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
conn_string = 'AccountName={};AccountKey={};'.format(storage_account.name, storage_account_key)
for service_type in SERVICES.items():
# Act
service = service_type[0].from_connection_string(
conn_string, container_name='foo', blob_name='bar')
# Assert
self.validate_standard_account_endpoints(service, service_type[1], storage_account.name, storage_account_key)
self.assertEqual(service.scheme, 'https')
@GlobalStorageAccountPreparer()
def test_create_service_with_connection_string_sas_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
conn_string = 'AccountName={};SharedAccessSignature={};'.format(storage_account.name, self.sas_token)
for service_type in SERVICES:
# Act
service = service_type.from_connection_string(
conn_string, container_name='foo', blob_name='bar')
# Assert
self.assertIsNotNone(service)
self.assertTrue(service.url.startswith('https://' + storage_account.name + '.blob.core.windows.net'))
self.assertTrue(service.url.endswith(self.sas_token))
self.assertIsNone(service.credential)
self.assertEqual(service.account_name, storage_account.name)
@GlobalStorageAccountPreparer()
def test_create_blob_client_with_complete_blob_url_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
blob_url = self.account_url(storage_account, "blob") + "/foourl/barurl"
service = BlobClient(blob_url, credential=storage_account_key, container_name='foo', blob_name='bar')
# Assert
self.assertEqual(service.scheme, 'https')
self.assertEqual(service.container_name, 'foo')
self.assertEqual(service.blob_name, 'bar')
self.assertEqual(service.account_name, storage_account.name)
@GlobalStorageAccountPreparer()
def test_creat_serv_w_connstr_endpoint_protocol_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
conn_string = 'AccountName={};AccountKey={};DefaultEndpointsProtocol=http;EndpointSuffix=core.chinacloudapi.cn;'.format(
storage_account.name, storage_account_key)
for service_type in SERVICES.items():
# Act
service = service_type[0].from_connection_string(conn_string, container_name="foo", blob_name="bar")
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertEqual(service.credential.account_name, storage_account.name)
self.assertEqual(service.credential.account_key, storage_account_key)
self.assertTrue(
service.primary_endpoint.startswith(
'http://{}.{}.core.chinacloudapi.cn/'.format(storage_account.name, service_type[1])))
self.assertTrue(
service.secondary_endpoint.startswith(
'http://{}-secondary.{}.core.chinacloudapi.cn'.format(storage_account.name, service_type[1])))
self.assertEqual(service.scheme, 'http')
@GlobalStorageAccountPreparer()
def test_create_service_with_connection_string_emulated_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
conn_string = 'UseDevelopmentStorage=true;'.format(storage_account.name, storage_account_key)
# Act
with self.assertRaises(ValueError):
service = service_type[0].from_connection_string(conn_string, container_name="foo", blob_name="bar")
@GlobalStorageAccountPreparer()
def test_create_service_with_connection_string_anonymous_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
conn_string = 'BlobEndpoint=www.mydomain.com;'
# Act
service = service_type[0].from_connection_string(conn_string, container_name="foo", blob_name="bar")
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, None)
self.assertIsNone(service.credential)
self.assertTrue(service.primary_endpoint.startswith('https://www.mydomain.com/'))
with self.assertRaises(ValueError):
service.secondary_endpoint
@GlobalStorageAccountPreparer()
def test_creat_serv_w_connstr_custm_domain_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
conn_string = 'AccountName={};AccountKey={};BlobEndpoint=www.mydomain.com;'.format(
storage_account.name, storage_account_key)
# Act
service = service_type[0].from_connection_string(conn_string, container_name="foo", blob_name="bar")
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertEqual(service.credential.account_name, storage_account.name)
self.assertEqual(service.credential.account_key, storage_account_key)
self.assertTrue(service.primary_endpoint.startswith('https://www.mydomain.com/'))
self.assertTrue(service.secondary_endpoint.startswith('https://' + storage_account.name + '-secondary.blob.core.windows.net'))
@GlobalStorageAccountPreparer()
def test_creat_serv_w_connstr_custm_dom_trailing_slash_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
conn_string = 'AccountName={};AccountKey={};BlobEndpoint=www.mydomain.com/;'.format(
storage_account.name, storage_account_key)
# Act
service = service_type[0].from_connection_string(conn_string, container_name="foo", blob_name="bar")
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertEqual(service.credential.account_name, storage_account.name)
self.assertEqual(service.credential.account_key, storage_account_key)
self.assertTrue(service.primary_endpoint.startswith('https://www.mydomain.com/'))
self.assertTrue(service.secondary_endpoint.startswith('https://' + storage_account.name + '-secondary.blob.core.windows.net'))
@GlobalStorageAccountPreparer()
def test_creat_serv_w_connstr_custm_dom_2ndry_override_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for service_type in SERVICES.items():
conn_string = 'AccountName={};AccountKey={};BlobEndpoint=www.mydomain.com/;'.format(
storage_account.name, storage_account_key)
# Act
service = service_type[0].from_connection_string(
conn_string, secondary_hostname="www-sec.mydomain.com", container_name="foo", blob_name="bar")
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertEqual(service.credential.account_name, storage_account.name)
self.assertEqual(service.credential.account_key, storage_account_key)
self.assertTrue(service.primary_endpoint.startswith('https://www.mydomain.com/'))
self.assertTrue(service.secondary_endpoint.startswith('https://www-sec.mydomain.com/'))
@GlobalStorageAccountPreparer()
def test_creat_serv_w_connstr_fail_if_2ndry_wo_primary_async(self, resource_group, location, storage_account, storage_account_key):
for service_type in SERVICES.items():
# Arrange
conn_string = 'AccountName={};AccountKey={};{}=www.mydomain.com;'.format(
storage_account.name, storage_account_key,
_CONNECTION_ENDPOINTS_SECONDARY.get(service_type[1]))
# Act
# Fails if primary excluded
with self.assertRaises(ValueError):
service = service_type[0].from_connection_string(conn_string, container_name="foo", blob_name="bar")
@GlobalStorageAccountPreparer()
def test_creat_serv_w_connstr_pass_if_2ndry_w_primary_async(self, resource_group, location, storage_account, storage_account_key):
for service_type in SERVICES.items():
# Arrange
conn_string = 'AccountName={};AccountKey={};{}=www.mydomain.com;{}=www-sec.mydomain.com;'.format(
storage_account.name,
storage_account_key,
_CONNECTION_ENDPOINTS.get(service_type[1]),
_CONNECTION_ENDPOINTS_SECONDARY.get(service_type[1]))
# Act
service = service_type[0].from_connection_string(conn_string, container_name="foo", blob_name="bar")
# Assert
self.assertIsNotNone(service)
self.assertEqual(service.account_name, storage_account.name)
self.assertEqual(service.credential.account_name, storage_account.name)
self.assertEqual(service.credential.account_key, storage_account_key)
self.assertTrue(service.primary_endpoint.startswith('https://www.mydomain.com/'))
self.assertTrue(service.secondary_endpoint.startswith('https://www-sec.mydomain.com/'))
def test_create_service_with_custom_account_endpoint_path(self):
account_name = "blobstorage"
account_key = "blobkey"
custom_account_url = "http://local-machine:11002/custom/account/path/" + self.sas_token
for service_type in SERVICES.items():
conn_string = 'DefaultEndpointsProtocol=http;AccountName={};AccountKey={};BlobEndpoint={};'.format(
account_name, account_key, custom_account_url)
# Act
service = service_type[0].from_connection_string(
conn_string, container_name="foo", blob_name="bar")
# Assert
self.assertEqual(service.account_name, account_name)
self.assertEqual(service.credential.account_name, account_name)
self.assertEqual(service.credential.account_key, account_key)
self.assertEqual(service.primary_hostname, 'local-machine:11002/custom/account/path')
service = BlobServiceClient(account_url=custom_account_url)
self.assertEqual(service.account_name, None)
self.assertEqual(service.credential, None)
self.assertEqual(service.primary_hostname, 'local-machine:11002/custom/account/path')
self.assertTrue(service.url.startswith('http://local-machine:11002/custom/account/path/?'))
service = ContainerClient(account_url=custom_account_url, container_name="foo")
self.assertEqual(service.account_name, None)
self.assertEqual(service.container_name, "foo")
self.assertEqual(service.credential, None)
self.assertEqual(service.primary_hostname, 'local-machine:11002/custom/account/path')
self.assertTrue(service.url.startswith('http://local-machine:11002/custom/account/path/foo?'))
service = ContainerClient.from_container_url("http://local-machine:11002/custom/account/path/foo?query=value")
self.assertEqual(service.account_name, None)
self.assertEqual(service.container_name, "foo")
self.assertEqual(service.credential, None)
self.assertEqual(service.primary_hostname, 'local-machine:11002/custom/account/path')
self.assertEqual(service.url, 'http://local-machine:11002/custom/account/path/foo')
service = BlobClient(account_url=custom_account_url, container_name="foo", blob_name="bar", snapshot="baz")
self.assertEqual(service.account_name, None)
self.assertEqual(service.container_name, "foo")
self.assertEqual(service.blob_name, "bar")
self.assertEqual(service.snapshot, "baz")
self.assertEqual(service.credential, None)
self.assertEqual(service.primary_hostname, 'local-machine:11002/custom/account/path')
self.assertTrue(service.url.startswith('http://local-machine:11002/custom/account/path/foo/bar?snapshot=baz&'))
service = BlobClient.from_blob_url("http://local-machine:11002/custom/account/path/foo/bar?snapshot=baz&query=value")
self.assertEqual(service.account_name, None)
self.assertEqual(service.container_name, "foo")
self.assertEqual(service.blob_name, "bar")
self.assertEqual(service.snapshot, "baz")
self.assertEqual(service.credential, None)
self.assertEqual(service.primary_hostname, 'local-machine:11002/custom/account/path')
self.assertEqual(service.url, 'http://local-machine:11002/custom/account/path/foo/bar?snapshot=baz')
def test_create_blob_client_with_sub_directory_path_in_blob_name(self):
blob_url = "https://testaccount.blob.core.windows.net/containername/dir1/sub000/2010_Unit150_Ivan097_img0003.jpg"
blob_client = BlobClient.from_blob_url(blob_url)
self.assertEqual(blob_client.container_name, "containername")
self.assertEqual(blob_client.blob_name, "dir1/sub000/2010_Unit150_Ivan097_img0003.jpg")
blob_emulator_url = 'http://127.0.0.1:1000/devstoreaccount1/containername/dir1/sub000/2010_Unit150_Ivan097_img0003.jpg'
blob_client = BlobClient.from_blob_url(blob_emulator_url)
self.assertEqual(blob_client.container_name, "containername")
self.assertEqual(blob_client.blob_name, "dir1/sub000/2010_Unit150_Ivan097_img0003.jpg")
self.assertEqual(blob_client.url, blob_emulator_url)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_request_callback_signed_header_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
service = BlobServiceClient(self.account_url(storage_account, "blob"), credential=storage_account_key)
name = self.get_resource_name('cont')
# Act
def callback(request):
if request.http_request.method == 'PUT':
request.http_request.headers['x-ms-meta-hello'] = 'world'
# Assert
try:
container = await service.create_container(name, raw_request_hook=callback)
metadata = (await container.get_container_properties()).metadata
self.assertEqual(metadata, {'hello': 'world'})
finally:
await service.delete_container(name)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_response_callback_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
service = BlobServiceClient(self.account_url(storage_account, "blob"), credential=storage_account_key)
name = self.get_resource_name('cont')
container = service.get_container_client(name)
# Act
def callback(response):
response.http_response.status_code = 200
response.http_response.headers = {}
# Assert
exists = await container.get_container_properties(raw_response_hook=callback)
self.assertTrue(exists)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_user_agent_default_async(self, resource_group, location, storage_account, storage_account_key):
service = BlobServiceClient(self.account_url(storage_account, "blob"), credential=storage_account_key)
def callback(response):
self.assertTrue('User-Agent' in response.http_request.headers)
assert "azsdk-python-storage-blob/{}".format(VERSION) in response.http_request.headers['User-Agent']
await service.get_service_properties(raw_response_hook=callback)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_user_agent_custom_async(self, resource_group, location, storage_account, storage_account_key):
custom_app = "TestApp/v1.0"
service = BlobServiceClient(
self.account_url(storage_account, "blob"), credential=storage_account_key, user_agent=custom_app)
def callback(response):
self.assertTrue('User-Agent' in response.http_request.headers)
assert ("TestApp/v1.0 azsdk-python-storage-blob/{} Python/{} ({})".format(
VERSION,
platform.python_version(),
platform.platform())) in response.http_request.headers['User-Agent']
await service.get_service_properties(raw_response_hook=callback)
def callback(response):
self.assertTrue('User-Agent' in response.http_request.headers)
assert ("TestApp/v2.0 TestApp/v1.0 azsdk-python-storage-blob/{} Python/{} ({})".format(
VERSION,
platform.python_version(),
platform.platform())) in response.http_request.headers['User-Agent']
await service.get_service_properties(raw_response_hook=callback, user_agent="TestApp/v2.0")
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_user_agent_append_async(self, resource_group, location, storage_account, storage_account_key):
service = BlobServiceClient(self.account_url(storage_account, "blob"), credential=storage_account_key)
def callback(response):
self.assertTrue('User-Agent' in response.http_request.headers)
assert ("customer_user_agent azsdk-python-storage-blob/{} Python/{} ({})".format(
VERSION,
platform.python_version(),
platform.platform())) in response.http_request.headers['User-Agent']
await service.get_service_properties(raw_response_hook=callback, user_agent='customer_user_agent')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_closing_pipeline_client(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for client, url in SERVICES.items():
# Act
service = client(
self.account_url(storage_account, "blob"), credential=storage_account_key, container_name='foo', blob_name='bar')
# Assert
async with service:
assert hasattr(service, 'close')
await service.close()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_closing_pipeline_client_simple(self, resource_group, location, storage_account, storage_account_key):
# Arrange
for client, url in SERVICES.items():
# Act
service = client(
self.account_url(storage_account, "blob"), credential=storage_account_key, container_name='foo', blob_name='bar')
await service.close()
# ------------------------------------------------------------------------------
| 51.543002 | 152 | 0.686995 |
f8ab051cab50552100db92944c0acf147f588ac1 | 9,948 | py | Python | docs/samples/specification/multiapi/generated/azure/multiapi/sample/_operations_mixin.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | docs/samples/specification/multiapi/generated/azure/multiapi/sample/_operations_mixin.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | docs/samples/specification/multiapi/generated/azure/multiapi/sample/_operations_mixin.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Iterable, Optional
from azure.core.paging import ItemPaged
from azure.core.polling import LROPoller
class MultiapiServiceClientOperationsMixin(object):
def begin_test_lro(
self,
product=None, # type: Optional["_models.Product"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Product"]
"""Put in whatever shape of Product you want, will return a Product with id equal to 100.
:param product: Product to put.
:type product: ~azure.multiapi.sample.v1.models.Product
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Product or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.multiapi.sample.v1.models.Product]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('begin_test_lro')
if api_version == '1.0.0':
from .v1.operations import MultiapiServiceClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_test_lro'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.begin_test_lro(product, **kwargs)
def begin_test_lro_and_paging(
self,
client_request_id=None, # type: Optional[str]
test_lro_and_paging_options=None, # type: Optional["_models.TestLroAndPagingOptions"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[ItemPaged["_models.PagingResult"]]
"""A long-running paging operation that includes a nextLink that has 10 pages.
:param client_request_id:
:type client_request_id: str
:param test_lro_and_paging_options: Parameter group.
:type test_lro_and_paging_options: ~azure.multiapi.sample.v1.models.TestLroAndPagingOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of either PagingResult
or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.core.paging.ItemPaged[~azure.multiapi.sample.v1.models.PagingResult]]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('begin_test_lro_and_paging')
if api_version == '1.0.0':
from .v1.operations import MultiapiServiceClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_test_lro_and_paging'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.begin_test_lro_and_paging(client_request_id, test_lro_and_paging_options, **kwargs)
def test_different_calls(
self,
greeting_in_english, # type: str
greeting_in_chinese=None, # type: Optional[str]
greeting_in_french=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Has added parameters across the API versions.
:param greeting_in_english: pass in 'hello' to pass test.
:type greeting_in_english: str
:param greeting_in_chinese: pass in 'nihao' to pass test.
:type greeting_in_chinese: str
:param greeting_in_french: pass in 'bonjour' to pass test.
:type greeting_in_french: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('test_different_calls')
if api_version == '1.0.0':
from .v1.operations import MultiapiServiceClientOperationsMixin as OperationClass
elif api_version == '2.0.0':
from .v2.operations import MultiapiServiceClientOperationsMixin as OperationClass
elif api_version == '3.0.0':
from .v3.operations import MultiapiServiceClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'test_different_calls'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.test_different_calls(greeting_in_english, greeting_in_chinese, greeting_in_french, **kwargs)
def test_one(
self,
id, # type: int
message=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""TestOne should be in an FirstVersionOperationsMixin.
:param id: An int parameter.
:type id: int
:param message: An optional string parameter.
:type message: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('test_one')
if api_version == '1.0.0':
from .v1.operations import MultiapiServiceClientOperationsMixin as OperationClass
elif api_version == '2.0.0':
from .v2.operations import MultiapiServiceClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'test_one'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.test_one(id, message, **kwargs)
def test_paging(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PagingResult"]
"""Returns ModelThree with optionalProperty 'paged'.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PagingResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.multiapi.sample.v3.models.PagingResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('test_paging')
if api_version == '3.0.0':
from .v3.operations import MultiapiServiceClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'test_paging'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.test_paging(**kwargs)
| 51.278351 | 122 | 0.687073 |
f796c97fcca65f3daa866dc57b27f44fdeba4191 | 948 | py | Python | output/models/sun_data/stype/st_facets/st_facets00401m/st_facets00401m7_xsd/st_facets00401m7.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/sun_data/stype/st_facets/st_facets00401m/st_facets00401m7_xsd/st_facets00401m7.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/sun_data/stype/st_facets/st_facets00401m/st_facets00401m7_xsd/st_facets00401m7.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import Enum
from typing import List
__NAMESPACE__ = "SType/ST_facets"
class S(Enum):
VALUE_00 = "এ00"
VALUE_0 = "এ-0"
A0 = "ঐa0"
VALUE_01 = "ও01"
VALUE_1 = "ঝ-1"
A1 = "নa1"
VALUE_02 = "প02"
VALUE_2 = "ভ-2"
A2 = "রa2"
VALUE_03 = "ল03"
VALUE_04 = "শ04"
VALUE_4 = "ষ-4"
A4 = "হa4"
VALUE_05 = "ড়05"
VALUE_5 = "ড়-5"
A5 = "ঢ়a5"
VALUE_06 = "য়06"
VALUE_6 = "ৠ-6"
A6 = "ৡa6"
VALUE_07 = "ৰ07"
VALUE_7 = "ৰ-7"
A7 = "ৱa7"
VALUE_08 = "ਅ08"
VALUE_8 = "ਇ-8"
A8 = "ਊa8"
VALUE_09 = "ਏ09"
VALUE_9 = "ਏ-9"
A9 = "ਐa9"
@dataclass
class Root:
class Meta:
name = "root"
namespace = "SType/ST_facets"
value: List[S] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
}
)
| 17.886792 | 40 | 0.503165 |
b76b06cc014577545e6e3539c34ca3debbfbb1f7 | 4,585 | py | Python | src/pathfinder.py | mikeboy91/poe-currency-flip-planner | 130aa7123a390b2f658ebbb46696ec8f9ad37802 | [
"MIT"
] | null | null | null | src/pathfinder.py | mikeboy91/poe-currency-flip-planner | 130aa7123a390b2f658ebbb46696ec8f9ad37802 | [
"MIT"
] | null | null | null | src/pathfinder.py | mikeboy91/poe-currency-flip-planner | 130aa7123a390b2f658ebbb46696ec8f9ad37802 | [
"MIT"
] | null | null | null | import logging
import time
from datetime import datetime
from typing import Dict, List
import jsonpickle
from src.config.user_config import UserConfig
from src.core import graph
from src.core.backends.backend_pool import BackendPool
from src.core.offer import Offer
from src.trading import ItemList
def format_conversions(conversions) -> str:
formatted_conversions = [format_conversion(c) for c in conversions]
msg = "\n".join(formatted_conversions)
return msg
def format_conversion(conversion) -> str:
msg = "{} -> {} -- {} ({} transactions)".format(
conversion["from"],
conversion["to"],
conversion["winnings"],
len(conversion["transactions"]),
)
return msg
class PathFinder:
"""
A simple class to abstract away the internal library functions for fetching
offers, constructing a graph and finding profitable paths along that graph.
"""
def __init__(
self,
league,
item_pairs,
user_config: UserConfig,
excluded_traders=[],
):
self.league = league
self.item_pairs = item_pairs
self.user_config = user_config
self.excluded_traders = excluded_traders
# Internal fields to store partial results
self.offers: List = []
self.graph: Dict = {}
self.results: Dict = {}
self.timestamp = str(datetime.now()).split(".")[0]
# Internal fields
self.item_list = ItemList.load_from_file()
self.logging = True
self.backend_pool = BackendPool(self.item_list)
def prepickle(self) -> str:
return jsonpickle.encode(
{
"timestamp": self.timestamp,
"league": self.league,
"item_pairs": self.item_pairs,
"offers": self.offers,
"graph": self.graph,
"results": self.results,
},
unpicklable=False,
indent=2)
async def _filter_traders(self, offers: List[Offer], excluded_traders) -> List:
excluded_traders = [name.lower() for name in excluded_traders]
return list(
filter(lambda x: x.contact_ign.lower() not in excluded_traders,
offers))
async def _fetch(self):
t_start = time.time()
logging.info("Fetching {} offers for {} pairs".format(
self.league, len(self.item_pairs)))
self.offers = await self.backend_pool.schedule(self.league, self.item_pairs, self.item_list)
# Filter out unwanted traders
self.offers = await self._filter_traders(self.offers, self.excluded_traders)
t_end = time.time()
logging.info("Spent {}s fetching offers".format(
round(t_end - t_start, 2)))
async def _build_graph(self):
t_start = time.time()
self.graph = graph.build_graph(self.offers)
t_end = time.time()
logging.info("Spent {}s building the graph".format(
round(t_end - t_start, 2)))
def _find_profitable_paths(self, max_transaction_length):
logging.info("Checking for profitable conversions...")
t_start = time.time()
for c in self.graph.keys():
# For currency @c, find all paths within the constructed path that are
# at most @max_transaction_length long
paths = graph.find_paths(self.graph, c, c, self.user_config,
max_transaction_length)
profitable_conversions = []
for p in paths:
conversion = graph.build_conversion(p, self.user_config)
if conversion is not None and conversion["winnings"] > 0:
profitable_conversions.append(conversion)
if self.logging:
n_profitable = len(profitable_conversions)
if n_profitable > 0:
logging.info("Checking {} -> {} Conversions".format(
c, n_profitable))
profitable_conversions = sorted(profitable_conversions,
key=lambda k: k["winnings"],
reverse=True)
self.results[c] = profitable_conversions
t_end = time.time()
if self.logging:
logging.info("Spent {}s finding paths".format(
round(t_end - t_start, 2)))
async def run(self, max_transaction_length=2):
await self._fetch()
await self._build_graph()
self._find_profitable_paths(max_transaction_length)
| 33.713235 | 100 | 0.598037 |
e091312bf6e9475368fe7732765c9335b250d7c2 | 398 | py | Python | guidance_and_support/views.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 4 | 2019-03-28T06:42:17.000Z | 2021-06-06T13:10:51.000Z | guidance_and_support/views.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 177 | 2018-09-28T14:21:56.000Z | 2022-03-30T21:45:26.000Z | guidance_and_support/views.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 8 | 2018-10-25T20:43:10.000Z | 2022-03-17T14:19:27.000Z | """View definitions for the guidance_and_support app."""
from django.shortcuts import render
def guidance_and_support(request):
"""Render the guidance and support page."""
return render(request, "guidance_and_support/guidance_and_support.html", {})
def community(request):
"""Render the community page."""
return render(request, "guidance_and_support/community_page.html", {})
| 28.428571 | 80 | 0.746231 |
2bb55992ed6b8fcd4497d7c9d45d6e1a6b19d46a | 5,692 | py | Python | fcos_core/modeling/roi_heads/box_head/roi_box_feature_extractors.py | amsword/FCOS | 310867ad36714d55f3cc4ee592eab046e8a515c2 | [
"BSD-2-Clause"
] | null | null | null | fcos_core/modeling/roi_heads/box_head/roi_box_feature_extractors.py | amsword/FCOS | 310867ad36714d55f3cc4ee592eab046e8a515c2 | [
"BSD-2-Clause"
] | null | null | null | fcos_core/modeling/roi_heads/box_head/roi_box_feature_extractors.py | amsword/FCOS | 310867ad36714d55f3cc4ee592eab046e8a515c2 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from fcos_core.modeling import registry
from fcos_core.modeling.backbone import resnet
from fcos_core.modeling.poolers import Pooler
from fcos_core.modeling.make_layers import group_norm
from fcos_core.modeling.make_layers import make_fc
@registry.ROI_BOX_FEATURE_EXTRACTORS.register("ResNet50Conv5ROIFeatureExtractor")
class ResNet50Conv5ROIFeatureExtractor(nn.Module):
def __init__(self, config, in_channels):
super(ResNet50Conv5ROIFeatureExtractor, self).__init__()
resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
use_torchvision = config.MODEL.ROI_BOX_HEAD.USE_TORCHVISION
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
use_torchvision=use_torchvision,
)
stage = resnet.StageSpec(index=4, block_count=3, return_features=False)
head = resnet.ResNetHead(
block_module=config.MODEL.RESNETS.TRANS_FUNC,
stages=(stage,),
num_groups=config.MODEL.RESNETS.NUM_GROUPS,
width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP,
stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1,
stride_init=None,
res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS,
dilation=config.MODEL.RESNETS.RES5_DILATION
)
self.pooler = pooler
self.head = head
self.out_channels = head.out_channels
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.head(x)
return x
@registry.ROI_BOX_FEATURE_EXTRACTORS.register("FPN2MLPFeatureExtractor")
class FPN2MLPFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPN2MLPFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
use_torchvision = cfg.MODEL.ROI_BOX_HEAD.USE_TORCHVISION
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
use_torchvision=use_torchvision,
)
input_size = in_channels * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
self.pooler = pooler
self.fc6 = make_fc(input_size, representation_size, use_gn)
self.fc7 = make_fc(representation_size, representation_size, use_gn)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
@registry.ROI_BOX_FEATURE_EXTRACTORS.register("FPNXconv1fcFeatureExtractor")
class FPNXconv1fcFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPNXconv1fcFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
use_torchvision = cfg.MODEL.ROI_BOX_HEAD.USE_TORCHVISION
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
use_torchvision=use_torchvision,
)
self.pooler = pooler
use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
conv_head_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM
num_stacked_convs = cfg.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS
dilation = cfg.MODEL.ROI_BOX_HEAD.DILATION
xconvs = []
for ix in range(num_stacked_convs):
xconvs.append(
nn.Conv2d(
in_channels,
conv_head_dim,
kernel_size=3,
stride=1,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
)
in_channels = conv_head_dim
if use_gn:
xconvs.append(group_norm(in_channels))
xconvs.append(nn.ReLU(inplace=True))
self.add_module("xconvs", nn.Sequential(*xconvs))
for modules in [self.xconvs,]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
if not use_gn:
torch.nn.init.constant_(l.bias, 0)
input_size = conv_head_dim * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.fc6 = make_fc(input_size, representation_size, use_gn=False)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.xconvs(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
return x
def make_roi_box_feature_extractor(cfg, in_channels):
func = registry.ROI_BOX_FEATURE_EXTRACTORS[
cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR
]
return func(cfg, in_channels)
| 36.025316 | 81 | 0.653197 |
f79325eca2efa0587d5978375557c1242b881222 | 1,066 | py | Python | setup.py | serverlessplus/py | 17acf7af8e6484a2c9844ca9fd28ae54e52ee1b5 | [
"Apache-2.0"
] | 10 | 2019-04-16T09:57:57.000Z | 2021-08-01T15:33:28.000Z | setup.py | serverlessplus/py | 17acf7af8e6484a2c9844ca9fd28ae54e52ee1b5 | [
"Apache-2.0"
] | null | null | null | setup.py | serverlessplus/py | 17acf7af8e6484a2c9844ca9fd28ae54e52ee1b5 | [
"Apache-2.0"
] | 4 | 2019-12-03T01:44:27.000Z | 2021-08-08T08:33:30.000Z | from setuptools import setup
with open('README.md', 'r') as file:
long_description = file.read()
setup(
name='serverlessplus',
packages=['serverlessplus'],
version='0.0.8',
license='Apache-2.0',
author='chenhengqi',
author_email='ritchiechen@tencent.com',
description='serverless your django/flask apps',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/serverlessplus/py',
install_requires=['werkzeug'],
keywords=['serverless', 'scf', 'tencent-cloud', 'wsgi', 'django', 'flask'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 34.387097 | 79 | 0.634146 |
11b8e48631ebdd6827b1f64e14411f9a63f5186f | 2,645 | py | Python | rhasspytts_wavenet_hermes/__main__.py | digitalfiz/rhasspy-tts-wavenet-hermes | 7744e9b266a9a25b70197970cb104751acea58ff | [
"MIT"
] | null | null | null | rhasspytts_wavenet_hermes/__main__.py | digitalfiz/rhasspy-tts-wavenet-hermes | 7744e9b266a9a25b70197970cb104751acea58ff | [
"MIT"
] | null | null | null | rhasspytts_wavenet_hermes/__main__.py | digitalfiz/rhasspy-tts-wavenet-hermes | 7744e9b266a9a25b70197970cb104751acea58ff | [
"MIT"
] | null | null | null | """Hermes MQTT service for Rhasspy TTS with Google Wavenet."""
import argparse
import asyncio
import logging
from pathlib import Path
import paho.mqtt.client as mqtt
import rhasspyhermes.cli as hermes_cli
from . import TtsHermesMqtt
_LOGGER = logging.getLogger("rhasspytts_wavenet_hermes")
# -----------------------------------------------------------------------------
def main():
"""Main method."""
parser = argparse.ArgumentParser(prog="rhasspy-tts-wavenet-hermes")
parser.add_argument(
"--credentials-json",
required=True,
help="Path to Google Wavenet credentials JSON file",
)
parser.add_argument(
"--cache-dir", required=True, help="Directory to cache WAV files"
)
parser.add_argument(
"--voice", default="Wavenet-C", help="Chosen voice (default: Wavenet-C)"
)
parser.add_argument(
"--gender", default="FEMALE", help="Chosen gender (default: FEMALE)"
)
parser.add_argument(
"--sample-rate",
default=22050,
type=int,
help="Chosen sample rate of the outpt wave sample (default: 22050)",
)
parser.add_argument(
"--language-code", default="en-US", help="Chosen language (default: en-US)"
)
parser.add_argument(
"--url",
default="https://texttospeech.googleapis.com/v1/text:synthesize",
help="Synthesize URL (default: v1)",
)
parser.add_argument(
"--play-command",
help="Command to play WAV data from stdin (default: publish playBytes)",
)
hermes_cli.add_hermes_args(parser)
args = parser.parse_args()
hermes_cli.setup_logging(args)
_LOGGER.debug(args)
args.credentials_json = Path(args.credentials_json)
args.cache_dir = Path(args.cache_dir)
# Listen for messages
client = mqtt.Client()
hermes = TtsHermesMqtt(
client,
credentials_json=args.credentials_json,
cache_dir=args.cache_dir,
voice=args.voice,
gender=args.gender,
sample_rate=args.sample_rate,
language_code=args.language_code,
url=args.url,
play_command=args.play_command,
site_ids=args.site_id,
)
_LOGGER.debug("Connecting to %s:%s", args.host, args.port)
hermes_cli.connect(client, args)
client.loop_start()
try:
# Run event loop
asyncio.run(hermes.handle_messages_async())
except KeyboardInterrupt:
pass
finally:
_LOGGER.debug("Shutting down")
client.loop_stop()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 27.842105 | 83 | 0.609074 |
3325c34eed3f38633028b097c43646c5615e6ce8 | 4,034 | py | Python | benchmark/startQiskit_noisy2314.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2314.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2314.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=33
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=30
prog.cz(input_qubit[0],input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=32
prog.x(input_qubit[3]) # number=28
prog.cx(input_qubit[0],input_qubit[3]) # number=29
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=13
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[1],input_qubit[0]) # number=16
prog.h(input_qubit[1]) # number=20
prog.h(input_qubit[2]) # number=19
prog.cx(input_qubit[3],input_qubit[0]) # number=24
prog.z(input_qubit[3]) # number=25
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=21
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[2],input_qubit[0]) # number=22
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2314.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.186441 | 140 | 0.652702 |
d43535cf2ff4f6c984969cd50485decb32188fc8 | 607 | py | Python | tests/float/float_format.py | MakeItZone/circuitpython | 7f803c0b51c333210ed267502422ed7bb28b9be7 | [
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | 198 | 2017-03-24T23:23:54.000Z | 2022-01-07T07:14:00.000Z | tests/float/float_format.py | MakeItZone/circuitpython | 7f803c0b51c333210ed267502422ed7bb28b9be7 | [
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | 509 | 2017-03-28T19:37:18.000Z | 2022-03-31T20:31:43.000Z | tests/float/float_format.py | MakeItZone/circuitpython | 7f803c0b51c333210ed267502422ed7bb28b9be7 | [
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | 187 | 2017-03-24T23:23:58.000Z | 2022-02-25T01:48:45.000Z | # test float formatting
# general rounding
for val in (116, 1111, 1234, 5010, 11111):
print('%.0f' % val)
print('%.1f' % val)
print('%.3f' % val)
# make sure rounding is done at the correct precision
for prec in range(8):
print(('%%.%df' % prec) % 6e-5)
# check certain cases that had a digit value of 10 render as a ":" character
print('%.2e' % float('9' * 51 + 'e-39'))
print('%.2e' % float('9' * 40 + 'e-21'))
# check a case that would render negative digit values, eg ")" characters
# the string is converted back to a float to check for no illegal characters
float('%.23e' % 1e-80)
| 30.35 | 76 | 0.635914 |
c14c8461772ff65036188ac2237a0299aa57965e | 276 | py | Python | test/test_toolbar.py | defuz/flask-debugtoolbar | b08fe477b6b796c808c0b3bfcaafa893974c676d | [
"BSD-3-Clause"
] | null | null | null | test/test_toolbar.py | defuz/flask-debugtoolbar | b08fe477b6b796c808c0b3bfcaafa893974c676d | [
"BSD-3-Clause"
] | null | null | null | test/test_toolbar.py | defuz/flask-debugtoolbar | b08fe477b6b796c808c0b3bfcaafa893974c676d | [
"BSD-3-Clause"
] | null | null | null | def load_app(name):
app = __import__(name).app
app.config['TESTING'] = True
return app.test_client()
def test_basic_app():
app = load_app('basic_app')
index = app.get('/')
assert index.status_code == 200
assert b'<div id="flDebug"' in index.data
| 23 | 45 | 0.644928 |
38986f7a3db60c98dd8f31a54929e2879862727f | 17,882 | py | Python | pyenzyme/restful/template.py | HannahDi/PyEnzyme | afbd03ab65445b2411307613c9a3cd7ab8ea4ae7 | [
"BSD-2-Clause"
] | 7 | 2020-10-28T22:04:56.000Z | 2022-03-01T12:23:57.000Z | pyenzyme/restful/template.py | HannahDi/PyEnzyme | afbd03ab65445b2411307613c9a3cd7ab8ea4ae7 | [
"BSD-2-Clause"
] | 33 | 2021-05-18T15:17:05.000Z | 2022-03-29T13:59:57.000Z | pyenzyme/restful/template.py | HannahDi/PyEnzyme | afbd03ab65445b2411307613c9a3cd7ab8ea4ae7 | [
"BSD-2-Clause"
] | 8 | 2020-10-27T13:31:06.000Z | 2022-03-20T10:40:56.000Z | '''
File: template.py
Project: restful
Author: Jan Range
License: BSD-2 clause
-----
Last Modified: Wednesday June 23rd 2021 9:57:02 pm
Modified By: Jan Range (<jan.range@simtech.uni-stuttgart.de>)
-----
Copyright (c) 2021 Institute of Biochemistry and Technical Biochemistry Stuttgart
'''
from flask import request, send_file, jsonify
from flask_apispec import doc, marshal_with, MethodResource
import os
import shutil
import io
from pyenzyme.enzymeml.tools import UnitCreator
from pyenzyme.enzymeml.core import \
Replicate, EnzymeMLDocument, EnzymeReaction,\
Vessel, Protein, Reactant, Creator
from pyenzyme.restful.template_schema import TemplateSchema
from builtins import enumerate
import tempfile
import numpy as np
import pandas as pd
desc = 'This endpoint is used to convert an EnzymeML-Template spreadsheet to \
an EnzymeML OMEX container.\
Upload your XLSM file using form-data with the "xlsm" tag. \
The endpoint will return the converted template as an OMEX file.'
class convertTemplate(MethodResource):
@doc(tags=['Convert EnzymeML-Template'], description=desc)
@marshal_with(TemplateSchema(), code=200)
def post(self):
# check if the post request has the file part
if 'xlsm' not in request.files:
return jsonify(
{"response": 'No file part'}
)
file = request.files['xlsm']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return jsonify(
{"response": 'No file selected'}
)
if file and file.filename.split('.')[-1] in "xlsm_xlsx":
file.seek(0)
enzmldoc = self.convertSheet(file)
# Send File
dirpath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"converter_temp"
)
os.makedirs(dirpath, exist_ok=True)
dirpath = os.path.join(
dirpath,
next(tempfile._get_candidate_names())
)
enzmldoc.toFile(dirpath)
path = os.path.join(
dirpath,
enzmldoc.getName().replace(' ', '_') + '.omex'
)
f = io.BytesIO(open(path, "rb").read())
f.name = enzmldoc.getName() + '.omex'
shutil.rmtree(dirpath, ignore_errors=True)
return send_file(
f,
mimetype='omex',
as_attachment=True,
attachment_filename='%s.omex' % enzmldoc.getName())
def convertSheet(self, file):
# gather sheets by name
sheets = self.getSheets(file)
# General information
info = self.getGeneralInfo(sheets["generalInfo"])
doc = info["doc"]
info.pop("doc")
enzmldoc = EnzymeMLDocument(**info)
enzmldoc.setCreated(str(doc))
# Creators
self.getCreators(sheets['creator'], enzmldoc)
# create mapping dictionary
self.proteinDict = dict()
self.reactantDict = dict()
# Vessel
self.getVessels(sheets["vessels"], enzmldoc)
# Reactants
self.getReactants(sheets["reactants"], enzmldoc)
# Proteins
self.getProteins(sheets["proteins"], enzmldoc)
# Reactions
self.getReactions(sheets["reactions"], enzmldoc)
# Data
self.getData(sheets["data"], enzmldoc)
return enzmldoc
def __cleanSpaces(self, cell):
if cell == " ":
return np.nan
else:
return cell
def getSheets(self, file):
def loadSheet(name, skiprow):
return pd.read_excel(
file,
sheet_name=name,
skiprows=skiprow
)
return {
"creator": loadSheet("General Information", 9),
"generalInfo": loadSheet("General Information", 1),
"vessels": loadSheet("Vessels", 2),
"reactants": loadSheet("Reactants", 2),
"proteins": loadSheet("Proteins", 2),
"reactions": loadSheet("Reactions", 2),
"kineticModels": loadSheet("Kinetic Models", 2),
"data": loadSheet("Data", 3),
}
def getCreators(self, sheet, enzmldoc):
sheet = sheet.iloc[:, 0:3].dropna()
sheet.columns = ["family_name", "given_name", "mail"]
data = sheet.to_dict("index")
creators = [Creator(**user) for user in data.values()]
enzmldoc.setCreator(creators)
def getGeneralInfo(self, sheet):
sheet = sheet.replace(np.nan, '#NULL#', regex=True)
data = dict()
def addData(name, val, d):
return d.update({name: val}) if val != "#NULL#" else False
keys = ["name", "doc", "doi", "pubmedID", "url"]
for i, name in enumerate(keys):
addData(
name,
sheet.iloc[i, 1],
data
)
return data
def getVessels(self, sheet, enzmldoc):
sheet = sheet.iloc[0:20, 0:4].applymap(self.__cleanSpaces)
sheet = sheet.dropna(thresh=sheet.shape[-1]-1)
# Vessel(name, id_, size, unit)
# rename columns to match kwargs
sheet.columns = ["id_", "name", "size", "unit"]
sheet = sheet.set_index("id_")
data = sheet.to_dict("index")
enzmldoc.setVessel(
Vessel(
id_="v0",
**data["v0"]
)
)
def getReactants(self, sheet, enzmldoc):
# Clean sheet
sheet = sheet.iloc[0:20, 0:6].applymap(self.__cleanSpaces)
sheet = sheet.dropna(thresh=sheet.shape[-1]-1)
sheet = sheet.replace(np.nan, '#NULL#', regex=True)
# Reactant(name, compartment, init_conc=0.0, substanceunits='NAN',
# constant=False, smiles=None, inchi=None)
# rename columns to match kwargs
sheet.columns = [
"id_",
"name",
"compartment",
"constant",
"smiles",
"inchi"
]
sheet = sheet.set_index("id_")
data = sheet.to_dict("index")
# Create PyEnzyme object
def boolCheck(val):
return bool(val) if val in "Not constant Constant" else val
for id_, item in data.items():
item = {
key: boolCheck(val)
for key, val in item.items()
if val != '#NULL#'
}
item["compartment"] = enzmldoc.getVessel().getId()
reactant = Reactant(**item)
reac_id = enzmldoc.addReactant(reactant, custom_id=id_)
self.reactantDict[reac_id] = reactant.getName()
def getProteins(self, sheet, enzmldoc):
# Clean sheet
sheet = sheet.iloc[0:20, 0:8].applymap(self.__cleanSpaces)
sheet = sheet.dropna(thresh=sheet.shape[-1]-3)
sheet = sheet.replace(np.nan, '#NULL#', regex=True)
# Protein(name, sequence, compartment=None, init_conc=None,
# substanceunits=None, constant=True, ecnumber=None, uniprotid=None,
# organism=None)
# rename columns to match kwargs
sheet.columns = [
"id_",
"name",
"sequence",
"compartment",
"constant",
"organism",
"ecnumber",
"uniprotid"
]
sheet = sheet.set_index("id_")
data = sheet.to_dict("index")
# Create PyEnzyme object
def boolCheck(val):
return True if val == "Not constant" else False \
if val == "Constant" else val
for id_, item in data.items():
item = {
key: boolCheck(val)
for key, val in item.items()
if val != '#NULL#'
}
item["compartment"] = enzmldoc.getVessel().getId()
protein = Protein(**item)
prot_id = enzmldoc.addProtein(
protein,
use_parser=False,
custom_id=id_
)
self.proteinDict[prot_id] = protein.getName()
# functions to extract elements
def getReacElements(self, item):
elements = {
"educts": item["educts"].split(', '),
"products": item["products"].split(', '),
"modifiers": item["modifiers"].split(', ') +
item["proteins"].split(', ')
}
item.pop("educts")
item.pop("products")
item.pop("modifiers")
item.pop("proteins")
return item, elements
def getReactions(self, sheet, enzmldoc):
# Clean sheet
sheet = sheet.iloc[0:20, 0:10].applymap(self.__cleanSpaces)
sheet = sheet.dropna(thresh=sheet.shape[-1]-1)
sheet = sheet.replace(np.nan, '#NULL#', regex=True)
# EnzymeReaction(self, temperature, tempunit, ph, name, reversible,
# educts=None, products=None, modifiers=None)
# rename columns to match kwargs
sheet.columns = [
"id_",
"name",
"temperature",
"tempunit",
"ph",
"reversible",
"educts",
"products",
"proteins",
"modifiers"
]
sheet = sheet.set_index("id_")
data = sheet.to_dict("index")
# Create PyEnzyme object
def boolCheck(val):
return True if val == "reversible" else \
False if val == "irreversible" else val
# rearrange unison of protein/reactantDict to guarantee consistency
inv_prDict = {
name: id_ for id_, name in {
**self.proteinDict,
**self.reactantDict}.items()
}
for id_, item in data.items():
item, elements = self.getReacElements(item)
item = {
key: boolCheck(val)
for key, val in item.items()
if val != '#NULL#'
}
reac = EnzymeReaction(**item)
# add elements
elemMap = {
"educts": reac.addEduct,
"products": reac.addProduct,
"modifiers": reac.addModifier
}
for key in elements:
fun = elemMap[key]
for elem in elements[key]:
if elem != "#NULL#":
elem_id = inv_prDict[elem]
if 's' in elem_id:
fun(
id_=elem_id,
stoichiometry=1.0,
constant=True,
enzmldoc=enzmldoc
)
elif 'p' in elem_id:
fun(
id_=elem_id,
stoichiometry=1.0,
constant=True,
enzmldoc=enzmldoc
)
else:
raise KeyError(
f"The identifier {elem_id} could not be parsed. \
Make sure the ID is either s for reactants \
or p for proteins."
)
enzmldoc.addReaction(reac)
def getData(self, sheet, enzmldoc):
# Clean sheet
sheet = sheet.dropna(how="all")
sheet = sheet.replace(np.nan, '#NULL#', regex=True)
reactions = set(sheet.iloc[:, 2])
for reac in reactions:
df_reac = sheet[sheet.iloc[:, 2] == reac]
exp_ids = set(sheet.iloc[:, 0])
for exp in exp_ids:
# fetch individiual experiments
df_exp = df_reac[df_reac.iloc[:, 0] == exp]
exp_dict = dict(reactants=list())
for index, row in df_exp.iterrows():
datType = row.iloc[1]
if "Time" in datType:
time_raw = [
val for val in list(row.iloc[9::])
if type(val) == float
]
exp_dict["time"] = {
"unit": datType.split("[")[-1].split(']')[0],
"raw": [
val for val in time_raw if type(val) == float
]
}
elif "Concentration" in datType or "Absorption" in datType:
if "Concentration" in datType:
data_type = "conc"
if "Absorption" in datType:
data_type = "abs"
data_raw = [
val for val in list(row.iloc[9::])
if type(val) == float
]
reactant = enzmldoc.getReactant(
row.iloc[6],
by_id=False
).getId()
init_val = row.iloc[7]
init_unit = repl_unit = row.iloc[8]
protein_id = enzmldoc.getProtein(
row.iloc[3],
by_id=False
).getId()
protein_init_val = row.iloc[4]
protein_init_unit = row.iloc[5]
enzmldoc.getReactant(reactant).setInitConc(init_val)
enzmldoc.getReactant(reactant).setSubstanceUnits(
UnitCreator().getUnit(init_unit, enzmldoc)
)
if data_type == "abs":
repl_unit = "abs"
if len(data_raw) > 0:
exp_dict["reactants"] += [{
"id": reactant,
"unit": repl_unit,
"init_val": init_val,
"raw": data_raw,
"type": data_type
}]
# Add Protein initial concentration to modifier
enzmldoc.getReaction(
reac, by_id=False
).addInitConc(
protein_id,
protein_init_val,
protein_init_unit,
enzmldoc
)
enzmldoc.getProtein(
protein_id
).setInitConc(
protein_init_val
)
enzmldoc.getProtein(
protein_id
).setSubstanceUnits(
UnitCreator().getUnit(
protein_init_unit, enzmldoc
)
)
else:
# Add initial concentration although
# no raw data is given
enzmldoc.getReaction(
reac, by_id=False
).addInitConc(
reactant,
init_val,
init_unit,
enzmldoc
)
# Add Protein initial concentration to modifier
enzmldoc.getReaction(
reac,
by_id=False).addInitConc(
protein_id,
protein_init_val,
protein_init_unit,
enzmldoc
)
enzmldoc.getProtein(
protein_id
).setInitConc(
protein_init_val
)
enzmldoc.getProtein(
protein_id
).setSubstanceUnits(
UnitCreator().getUnit(
protein_init_unit,
enzmldoc)
)
# add replicates to enzmldoc
for i, reactant in enumerate(exp_dict["reactants"]):
repl = Replicate(
replica=f"repl_{exp}_{i}",
reactant=reactant["id"],
type_=reactant["type"],
data_unit=reactant["unit"],
time_unit=exp_dict["time"]["unit"],
init_conc=reactant["init_val"],
data=reactant["raw"],
time=exp_dict["time"]["raw"]
)
enzmldoc.getReaction(
reac,
by_id=False
).addReplicate(
repl,
enzmldoc
)
| 31.932143 | 81 | 0.443686 |
af9ac0dd6172439349e499ca7c97b8942892ae3c | 1,009 | py | Python | rally/common/objects/__init__.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 263 | 2015-04-26T16:05:34.000Z | 2022-02-28T11:17:07.000Z | rally/common/objects/__init__.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 19 | 2015-04-23T11:53:10.000Z | 2019-02-20T11:23:09.000Z | rally/common/objects/__init__.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 287 | 2015-04-23T11:28:03.000Z | 2021-09-16T13:05:53.000Z | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contains the Rally objects."""
from rally.common.objects.deploy import Deployment # noqa
from rally.common.objects.task import Subtask # noqa
from rally.common.objects.task import Task # noqa
from rally.common.objects.task import Workload # noqa
from rally.common.objects.verification import Verification # noqa
from rally.common.objects.verifier import Verifier # noqa
| 43.869565 | 78 | 0.75223 |
e400025548a71e8814de001af196838bbd528db5 | 1,039 | py | Python | runstat.py | commaai/comma10k | c48d22c608539728d7b8db6e818823b88f69a073 | [
"MIT"
] | 567 | 2020-02-28T20:00:31.000Z | 2022-03-30T16:31:19.000Z | runstat.py | commaai/comma10k | c48d22c608539728d7b8db6e818823b88f69a073 | [
"MIT"
] | 1,404 | 2020-03-03T07:35:08.000Z | 2022-03-28T00:14:40.000Z | runstat.py | commaai/comma10k | c48d22c608539728d7b8db6e818823b88f69a073 | [
"MIT"
] | 718 | 2020-02-28T20:29:36.000Z | 2022-03-19T18:17:01.000Z | #!/usr/bin/env python3
import os
import subprocess
def tx(x):
if b"/sa" in x:
b,e = x.split(b'/sa')
ret = b"%s/%04d%s" % (b,int(e[0:5])+5000, e[5:])
return ret
return x
# https://stackoverflow.com/questions/5669621/git-find-out-which-files-have-had-the-most-commits
out = subprocess.check_output("git rev-list --objects --all | awk '$2' | sort -k2 | uniq -cf1 | sort -rn", shell=True).strip().split(b"\n")
fnn = []
al_set = set()
for j in out:
jj = j.strip().split(b" ")
if len(jj) != 3:
continue
cnt, _, fn = jj
fn = tx(fn)
cnt = int(cnt)
if os.path.isfile(fn) and fn.startswith(b"masks/"):
if cnt > 1:
fnn.append(fn)
al_set.add(fn)
out = sorted(list(set(fnn)))
missing_count = len(al_set) - len(out)
if missing_count < 20:
print(f"last {missing_count} mask(s) missing:")
print(al_set.difference(set(out)))
with open("files_trainable", "wb") as f:
f.write(b'\n'.join(out))
print("number labelled %d/%d, percent done: %.2f%%" % (len(out), len(al_set), len(out)/len(al_set)*100.))
| 25.975 | 139 | 0.620789 |
6f358779d9fec79c82c180da4284e30f0efdcda9 | 836 | py | Python | hbmqtt_broker/__init__.py | thingsroot/power_vsp | fae50959b859a4562ac90167531a7a617025643a | [
"MIT"
] | 3 | 2020-04-27T08:27:31.000Z | 2021-04-26T14:58:50.000Z | hbmqtt_broker/__init__.py | thingsroot/power_vsp | fae50959b859a4562ac90167531a7a617025643a | [
"MIT"
] | 1 | 2020-06-03T09:40:17.000Z | 2020-06-04T07:47:31.000Z | hbmqtt_broker/__init__.py | thingsroot/power_vsp | fae50959b859a4562ac90167531a7a617025643a | [
"MIT"
] | 3 | 2020-06-02T08:59:10.000Z | 2021-04-26T14:58:54.000Z | import logging
import asyncio
import threading
from hbmqtt.broker import Broker
from hbmqtt_broker.conf import broker_config
@asyncio.coroutine
def broker_coro(config):
broker = Broker(config)
yield from broker.start()
class MQTTBroker(threading.Thread):
def __init__(self, config=None):
threading.Thread.__init__(self)
self._broker_config = config or broker_config
def run(self):
loop = asyncio.new_event_loop()
loop.run_until_complete(broker_coro(self._broker_config))
loop.run_forever()
if __name__ == '__main__':
formatter = "[%(asctime)s] :: %(levelname)s :: %(name)s :: %(message)s"
logging.basicConfig(level=logging.INFO, format=formatter)
asyncio.get_event_loop().run_until_complete(broker_coro(broker_config))
asyncio.get_event_loop().run_forever()
| 28.827586 | 75 | 0.726077 |
701bfaf627d63b365b5f42a392f467e0a87e10fc | 359 | py | Python | thaniya_server/src/thaniya_server/flask/FlaskFilter_toStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
] | 1 | 2021-01-20T18:27:22.000Z | 2021-01-20T18:27:22.000Z | thaniya_server/src/thaniya_server/flask/FlaskFilter_toStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
] | null | null | null | thaniya_server/src/thaniya_server/flask/FlaskFilter_toStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
] | null | null | null |
from .AbstractFlaskTemplateFilter import AbstractFlaskTemplateFilter
#
# Creates a canonical string represetation of data.
#
class FlaskFilter_toStr(AbstractFlaskTemplateFilter):
def __call__(self, data):
if data is None:
return ""
elif data is True:
return "true"
elif data is False:
return "false"
return str(data)
#
#
| 10.257143 | 68 | 0.710306 |
de1e15b055c5fb7fd5e7278bd01f1daffef57889 | 5,630 | py | Python | RedLionfishDeconv/RLDeconvolve.py | DragaDoncila/RedLionfish | f7661aa85c3f92f799cb3402c237cb6ea9146f10 | [
"Apache-2.0"
] | 4 | 2021-11-20T07:51:40.000Z | 2022-01-28T21:11:50.000Z | RedLionfishDeconv/RLDeconvolve.py | DragaDoncila/RedLionfish | f7661aa85c3f92f799cb3402c237cb6ea9146f10 | [
"Apache-2.0"
] | 3 | 2021-11-20T16:15:55.000Z | 2022-02-01T22:00:44.000Z | RedLionfishDeconv/RLDeconvolve.py | DragaDoncila/RedLionfish | f7661aa85c3f92f799cb3402c237cb6ea9146f10 | [
"Apache-2.0"
] | 1 | 2021-12-21T02:55:06.000Z | 2021-12-21T02:55:06.000Z | '''
Copyright 2021 Rosalind Franklin Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#Entry point for doing RL 3D deconvolution using either CPU or GPU
#TODO: A lot of these parameters can be passed as **kwargs.
#TODO: Test GPU acceleration on MacOS
#TODO: Consider open files using scipy.imread rather than the tifffile modules
#from RedLionfishDeconv import helperfunctions
#from helperfunctions import *
#import helperfunctions
import logging
def doRLDeconvolutionFromNpArrays(data_np , psf_np ,*, niter=10, method='gpu', useBlockAlgorithm=False, callbkTickFunc=None, resAsUint8 = False):
'''
Richardson-Lucy deconvolution of 3D data.
It does NOT use the skimage.image.restoration.rishardson_lucy.
Iteration uses FFT-based convolutions, either using CPU (scipy) or GPU (Reikna OpenCL)
Parameters:
data_np: 3d volume of data as numpy array
psf_np: point-spread-function to use for deconvolution
niter: number of iterations to perform
method: 'gpu' to use Reikna OpenCL , 'cpu' to use Scipy.
useBlockAlgorithm: 'gpu' only, forces to use block algorithm. Result may show boundary effects.
callbkTickFunc: function to use to provide tick update during the RL algorithm. This can be either each iteration step or in case of block algorithm, each block calculation
resAsUint8: Set to return the result of the RL deconvolution as a np.uint8 array. Useful for displaying in napari for example. Note that it will adjust minimum and maximum to ful range 0-255.
Set to False to return result as default np.float32 format.
'''
from RedLionfishDeconv import helperfunctions
logging.info(f"doRLDeconvolutionFromNpArrays(), niter={niter} , method={method} , useBlockAlgorithm={useBlockAlgorithm}, resAsUint8={resAsUint8}")
resRL = None
if method=='gpu':
import RedLionfishDeconv.RLDeconv3DReiknaOCL as rlreikna
if rlreikna.isReiknaAvailable:
#Great, use the reikna then
if not useBlockAlgorithm:
#Use standard RL OCL version
try:
logging.info("Try using the non-block deconvolution algorithm")
resRL = rlreikna.nonBlock_RLDeconvolutionReiknaOCL(data_np, psf_np, niter=niter, callbkTickFunc=callbkTickFunc)
except Exception as e:
#Probably out of memory error, fallback to block algorithm
logging.info("nonBlock_RLDeconvolutionReiknaOCL() failed (GPU). Will try next to use block deconvolution.")
logging.info(e)
useBlockAlgorithm= True
if useBlockAlgorithm:
bKeepTrying=True
blocksize=512
while bKeepTrying:
try:
resRL = rlreikna.block_RLDeconv3DReiknaOCL4(data_np , psf_np,niter=niter,max_dim_size=blocksize, callbkTickFunc=callbkTickFunc)
bKeepTrying=False
except Exception as e:
#Error doing previous calculation, reduce block size
logging.info(f"Error: block_RLDeconv3DReiknaOCL4 with blocksize={blocksize} failed (GPU). Will try to halve blocksize.")
logging.info(e)
if blocksize>=128 :
blocksize = blocksize//2
bKeepTrying=True
else:
#No point reducing the block size to smaller, fall back to CPU
bKeepTrying=False
method = 'cpu'
logging.info('GPU calculation failed, falling back to CPU.')
else:
logging.info("Reikna is not available, falling back to CPU scipy calculation")
method = 'cpu'
if method == 'cpu':
from . import RLDeconv3DScipy as rlcpu
try:
resRL = rlcpu.doRLDeconvolution_DL2_4(data_np, psf_np, niter=niter, callbkTickFunc=callbkTickFunc)
except Exception as e:
logging.info("doRLDeconvolution_DL2_4 failed (CPU) with error:")
logging.info(str(e))
if resAsUint8:
resRL = helperfunctions.convertToUint8AndFullRange(resRL)
return resRL
def doRLDeconvolutionFromFiles(datapath, psfpath, niter, savepath=None):
import tifffile as tf
import sys
import numpy as np
#Check al info is ok
data_np = np.array(tf.imread(datapath))
if data_np.ndim !=3:
logging.info("Data is not 3 dimensional. Exiting.")
sys.exit
psf_np = np.array(tf.imread(psfpath))
if psf_np.ndim != 3:
logging.info("Psf is not 3-dimensional. Exiting.")
sys.exit()
res_np = doRLDeconvolutionFromNpArrays(data_np, psf_np, niter=niter, resAsUint8=True)
logging.info("res_np collected")
if (not res_np is None) and (not savepath is None):
logging.info(f"Saving data to {savepath}")
tf.imsave(savepath, res_np)
return res_np
| 43.643411 | 203 | 0.649911 |
d093af07aa060b38370d58195b8b4de05100ea4a | 3,656 | py | Python | kivy/uix/camera.py | ableity/kivy_android_camera_fix | 2a54ea17143111af3e46d46f6756e880c4d91f54 | [
"MIT"
] | null | null | null | kivy/uix/camera.py | ableity/kivy_android_camera_fix | 2a54ea17143111af3e46d46f6756e880c4d91f54 | [
"MIT"
] | null | null | null | kivy/uix/camera.py | ableity/kivy_android_camera_fix | 2a54ea17143111af3e46d46f6756e880c4d91f54 | [
"MIT"
] | null | null | null | '''
Camera
======
The :class:`Camera` widget is used to capture and display video from a camera.
Once the widget is created, the texture inside the widget will be automatically
updated. Our :class:`~kivy.core.camera.CameraBase` implementation is used under
the hood::
cam = Camera()
By default, the first camera found on your system is used. To use a different
camera, set the index property::
cam = Camera(index=1)
You can also select the camera resolution::
cam = Camera(resolution=(320, 240))
.. warning::
The camera texture is not updated as soon as you have created the object.
The camera initialization is asynchronous, so there may be a delay before
the requested texture is created.
'''
__all__ = ('Camera', )
from kivy.uix.image import Image
from kivy.core.camera import Camera as CoreCamera
from kivy.properties import NumericProperty, ListProperty, \
BooleanProperty
class Camera(Image):
'''Camera class. See module documentation for more information.
'''
play = BooleanProperty(True)
'''Boolean indicating whether the camera is playing or not.
You can start/stop the camera by setting this property::
# start the camera playing at creation (default)
cam = Camera(play=True)
# create the camera, and start later
cam = Camera(play=False)
# and later
cam.play = True
:attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to
True.
'''
index = NumericProperty(-1)
'''Index of the used camera, starting from 0.
:attr:`index` is a :class:`~kivy.properties.NumericProperty` and defaults
to -1 to allow auto selection.
'''
zoom = NumericProperty(0)
resolution = ListProperty([-1, -1])
'''Preferred resolution to use when invoking the camera. If you are using
[-1, -1], the resolution will be the default one::
# create a camera object with the best image available
cam = Camera()
# create a camera object with an image of 320x240 if possible
cam = Camera(resolution=(320, 240))
.. warning::
Depending on the implementation, the camera may not respect this
property.
:attr:`resolution` is a :class:`~kivy.properties.ListProperty` and defaults
to [-1, -1].
'''
def __init__(self, **kwargs):
self._camera = None
super(Camera, self).__init__(**kwargs)
if self.index == -1:
self.index = 0
on_index = self._on_index
fbind = self.fbind
fbind('index', on_index)
fbind('resolution', on_index)
fbind('zoom',on_index) # edit by ableity
fbind('focusmode', on_index) # edit by ableity
on_index()
def on_tex(self, *l):
self.canvas.ask_update()
def _on_index(self, *largs):
self._camera = None
if self.index < 0:
return
if self.resolution[0] < 0 or self.resolution[1] < 0:
return
self._camera = CoreCamera(index=self.index,
resolution=self.resolution, stopped=True,zoom=self.zoom, focusmode=self.focusmode) # edit by ableity
self._camera.bind(on_load=self._camera_loaded)
if self.play:
self._camera.start()
self._camera.bind(on_texture=self.on_tex)
def _camera_loaded(self, *largs):
self.texture = self._camera.texture
self.texture_size = list(self.texture.size)
def on_play(self, instance, value):
if not self._camera:
return
if value:
self._camera.start()
else:
self._camera.stop()
| 29.967213 | 135 | 0.638676 |
6c0c35a0db658b16b266d03ab38f688faa4007b9 | 1,036 | py | Python | bfc/codegen/base.py | staceb/esotope-bfc | 0f4992cce44f3fbf1046ec1c6c981bc4fad023f7 | [
"MIT"
] | 25 | 2015-08-24T11:40:10.000Z | 2022-03-24T03:11:25.000Z | bfc/codegen/base.py | staceb/esotope-bfc | 0f4992cce44f3fbf1046ec1c6c981bc4fad023f7 | [
"MIT"
] | 30 | 2017-11-22T16:01:15.000Z | 2017-11-22T16:10:27.000Z | bfc/codegen/base.py | staceb/esotope-bfc | 0f4992cce44f3fbf1046ec1c6c981bc4fad023f7 | [
"MIT"
] | 2 | 2018-11-08T15:04:11.000Z | 2021-01-26T17:24:13.000Z | # This is a part of Esotope Brainfuck Compiler.
import bfc.nodes
class BaseGenerator(object):
def __init__(self, compiler):
self.compiler = compiler
self.nindents = 0
self.genmap = {}
for name in dir(self):
if not name.startswith('generate_'): continue
try:
self.genmap[getattr(bfc.nodes, name[9:])] = getattr(self, name)
except AttributeError:
pass
def __del__(self):
self.flush()
def __getattr__(self, name):
return getattr(self.compiler, name)
def flush(self):
raise NotImplemented
def indent(self):
self.nindents += 1
def dedent(self):
self.nindents -= 1
def _generatenested(self, children):
genmap = self.genmap
self.indent()
for child in children:
genmap[type(child)](child)
self.dedent()
def generate(self, node):
self.debugging = self.compiler.debugging
self.genmap[type(node)](node)
| 23.022222 | 79 | 0.580116 |
b3f3f74c09d4fefdd72645fa263c6dc3d1e290e7 | 714 | py | Python | stor/types/name_puzzle_condition.py | Stor-Network/stor-blockchain | 3c3cd1a3b99592e88160107ca5b81afc0937b992 | [
"Apache-2.0"
] | 19 | 2021-06-29T20:06:09.000Z | 2022-02-09T04:33:00.000Z | stor/types/name_puzzle_condition.py | Stor-Network/stor-blockchain | 3c3cd1a3b99592e88160107ca5b81afc0937b992 | [
"Apache-2.0"
] | 8 | 2021-07-04T03:21:51.000Z | 2021-12-27T07:56:09.000Z | stor/types/name_puzzle_condition.py | Stor-Network/stor-blockchain | 3c3cd1a3b99592e88160107ca5b81afc0937b992 | [
"Apache-2.0"
] | 6 | 2021-10-04T17:15:30.000Z | 2022-03-15T08:40:01.000Z | from dataclasses import dataclass
from typing import Dict, List, Tuple
from stor.types.blockchain_format.sized_bytes import bytes32
from stor.types.condition_with_args import ConditionWithArgs
from stor.types.condition_opcodes import ConditionOpcode
from stor.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class NPC(Streamable):
coin_name: bytes32
puzzle_hash: bytes32
conditions: List[Tuple[ConditionOpcode, List[ConditionWithArgs]]]
@property
def condition_dict(self):
d: Dict[ConditionOpcode, List[ConditionWithArgs]] = {}
for opcode, l in self.conditions:
assert opcode not in d
d[opcode] = l
return d
| 29.75 | 69 | 0.745098 |
76bf4af665530d02683507fcf3348742c4f60f95 | 12,787 | py | Python | clinicaml/pipelines/t1_volume_dartel2mni/t1_volume_dartel2mni_pipeline.py | HorlavaNastassya/clinica | 65424423e319f981f0b20ebd6bb82060aab271c2 | [
"MIT"
] | null | null | null | clinicaml/pipelines/t1_volume_dartel2mni/t1_volume_dartel2mni_pipeline.py | HorlavaNastassya/clinica | 65424423e319f981f0b20ebd6bb82060aab271c2 | [
"MIT"
] | null | null | null | clinicaml/pipelines/t1_volume_dartel2mni/t1_volume_dartel2mni_pipeline.py | HorlavaNastassya/clinica | 65424423e319f981f0b20ebd6bb82060aab271c2 | [
"MIT"
] | null | null | null | # coding: utf8
import clinicaml.pipelines.engine as cpe
class T1VolumeDartel2MNI(cpe.Pipeline):
"""T1VolumeDartel2MNI - Dartel template to MNI.
Returns:
A clinicaml pipeline object containing the T1VolumeDartel2MNI pipeline.
"""
def check_pipeline_parameters(self):
"""Check pipeline parameters."""
from clinicaml.utils.group import check_group_label
if "group_label" not in self.parameters.keys():
raise KeyError("Missing compulsory group_label key in pipeline parameter.")
if "tissues" not in self.parameters:
self.parameters["tissues"] = [1, 2, 3]
if "voxel_size" not in self.parameters:
self.parameters["voxel_size"] = None
if "modulate" not in self.parameters:
self.parameters["modulate"] = True
if "smooth" not in self.parameters:
self.parameters["smooth"] = [8]
check_group_label(self.parameters["group_label"])
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file."""
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ["native_segmentations", "flowfield_files", "template_file"]
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
return ["normalized_files", "smoothed_normalized_files", "atlas_statistics"]
def build_input_node(self):
"""Build and connect an input node to the pipeline."""
import os
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from colorama import Fore
from clinicaml.utils.exceptions import ClinicaCAPSError, ClinicaException
from clinicaml.utils.input_files import (
t1_volume_deformation_to_template,
t1_volume_final_group_template,
t1_volume_native_tpm,
)
from clinicaml.utils.inputs import clinica_file_reader, clinica_group_reader
from clinicaml.utils.stream import cprint
from clinicaml.utils.ux import (
print_groups_in_caps_directory,
print_images_to_process,
)
# Check that group already exists
if not os.path.exists(
os.path.join(
self.caps_directory, "groups", "group-" + self.parameters["group_label"]
)
):
print_groups_in_caps_directory(self.caps_directory)
raise ClinicaException(
f"{Fore.RED}Group {self.parameters['group_label']} does not exist. "
f"Did you run t1-volume or t1-volume-create-dartel pipeline?{Fore.RESET}"
)
all_errors = []
read_input_node = npe.Node(
name="LoadingCLIArguments",
interface=nutil.IdentityInterface(
fields=self.get_input_fields(), mandatory_inputs=True
),
)
# Segmented Tissues
# =================
tissues_input = []
for tissue_number in self.parameters["tissues"]:
try:
native_space_tpm = clinica_file_reader(
self.subjects,
self.sessions,
self.caps_directory,
t1_volume_native_tpm(tissue_number),
)
tissues_input.append(native_space_tpm)
except ClinicaException as e:
all_errors.append(e)
# Tissues_input has a length of len(self.parameters['mask_tissues']). Each of these elements has a size of
# len(self.subjects). We want the opposite : a list of size len(self.subjects) whose elements have a size of
# len(self.parameters['mask_tissues']. The trick is to iter on elements with zip(*my_list)
tissues_input_rearranged = []
for subject_tissue_list in zip(*tissues_input):
tissues_input_rearranged.append(subject_tissue_list)
read_input_node.inputs.native_segmentations = tissues_input_rearranged
# Flow Fields
# ===========
try:
read_input_node.inputs.flowfield_files = clinica_file_reader(
self.subjects,
self.sessions,
self.caps_directory,
t1_volume_deformation_to_template(self.parameters["group_label"]),
)
except ClinicaException as e:
all_errors.append(e)
# Dartel Template
# ================
try:
read_input_node.inputs.template_file = clinica_group_reader(
self.caps_directory,
t1_volume_final_group_template(self.parameters["group_label"]),
)
except ClinicaException as e:
all_errors.append(e)
if len(all_errors) > 0:
error_message = "Clinica faced error(s) while trying to read files in your CAPS/BIDS directories.\n"
for msg in all_errors:
error_message += str(msg)
raise ClinicaCAPSError(error_message)
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last a few minutes per image.")
# fmt: off
self.connect(
[
(read_input_node, self.input_node, [("native_segmentations", "native_segmentations"),
("flowfield_files", "flowfield_files"),
("template_file", "template_file")]),
]
)
# fmt: on
def build_output_node(self):
"""Build and connect an output node to the pipeline."""
import nipype.interfaces.io as nio
import nipype.pipeline.engine as npe
from clinicaml.utils.filemanip import zip_nii
# Writing normalized images (and smoothed) into CAPS
# ==================================================
write_normalized_node = npe.MapNode(
name="write_normalized_node",
iterfield=["container", "normalized_files", "smoothed_normalized_files"],
interface=nio.DataSink(
infields=["normalized_files", "smoothed_normalized_files"]
),
)
write_normalized_node.inputs.base_directory = self.caps_directory
write_normalized_node.inputs.parameterization = False
write_normalized_node.inputs.container = [
"subjects/"
+ self.subjects[i]
+ "/"
+ self.sessions[i]
+ "/t1/spm/dartel/group-"
+ self.parameters["group_label"]
for i in range(len(self.subjects))
]
write_normalized_node.inputs.regexp_substitutions = [
(r"(.*)c1(sub-.*)(\.nii(\.gz)?)$", r"\1\2_segm-graymatter_probability\3"),
(r"(.*)c2(sub-.*)(\.nii(\.gz)?)$", r"\1\2_segm-whitematter_probability\3"),
(r"(.*)c3(sub-.*)(\.nii(\.gz)?)$", r"\1\2_segm-csf_probability\3"),
(r"(.*)c4(sub-.*)(\.nii(\.gz)?)$", r"\1\2_segm-bone_probability\3"),
(r"(.*)c5(sub-.*)(\.nii(\.gz)?)$", r"\1\2_segm-softtissue_probability\3"),
(r"(.*)c6(sub-.*)(\.nii(\.gz)?)$", r"\1\2_segm-background_probability\3"),
(
r"(.*)mw(sub-.*)_probability(\.nii(\.gz)?)$",
r"\1\2_space-Ixi549Space_modulated-on_probability\3",
),
(
r"(.*)w(sub-.*)_probability(\.nii(\.gz)?)$",
r"\1\2_space-Ixi549Space_modulated-off_probability\3",
),
(r"(.*)/normalized_files/(sub-.*)$", r"\1/\2"),
(
r"(.*)/smoothed_normalized_files/(fwhm-[0-9]+mm)_(sub-.*)_probability(\.nii(\.gz)?)$",
r"\1/\3_\2_probability\4",
),
(r"trait_added", r""),
]
# fmt: off
self.connect(
[
(self.output_node, write_normalized_node,
[
(("normalized_files", zip_nii, True), "normalized_files"),
(("smoothed_normalized_files", zip_nii, True), "smoothed_normalized_files"),
],
)
]
)
# fmt: on
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline."""
import nipype.interfaces.spm as spm
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinicaml.utils.filemanip import unzip_nii
from clinicaml.utils.spm import spm_standalone_is_available, use_spm_standalone
from ..t1_volume_dartel2mni import (
t1_volume_dartel2mni_utils as dartel2mni_utils,
)
if spm_standalone_is_available():
use_spm_standalone()
# Unzipping
# =========
unzip_tissues_node = npe.MapNode(
nutil.Function(
input_names=["in_file"], output_names=["out_file"], function=unzip_nii
),
name="unzip_tissues_node",
iterfield=["in_file"],
)
unzip_flowfields_node = npe.MapNode(
nutil.Function(
input_names=["in_file"], output_names=["out_file"], function=unzip_nii
),
name="unzip_flowfields_node",
iterfield=["in_file"],
)
unzip_template_node = npe.Node(
nutil.Function(
input_names=["in_file"], output_names=["out_file"], function=unzip_nii
),
name="unzip_template_node",
)
# DARTEL2MNI Registration
# =======================
dartel2mni_node = npe.MapNode(
spm.DARTELNorm2MNI(),
name="dartel2MNI",
iterfield=["apply_to_files", "flowfield_files"],
)
if self.parameters["voxel_size"] is not None:
dartel2mni_node.inputs.voxel_size = tuple(self.parameters["voxel_size"])
dartel2mni_node.inputs.modulate = self.parameters["modulate"]
dartel2mni_node.inputs.fwhm = 0
# Smoothing
# =========
if self.parameters["smooth"] is not None and len(self.parameters["smooth"]) > 0:
smoothing_node = npe.MapNode(
spm.Smooth(), name="smoothing_node", iterfield=["in_files"]
)
smoothing_node.iterables = [
("fwhm", [[x, x, x] for x in self.parameters["smooth"]]),
(
"out_prefix",
["fwhm-" + str(x) + "mm_" for x in self.parameters["smooth"]],
),
]
smoothing_node.synchronize = True
join_smoothing_node = npe.JoinNode(
interface=nutil.Function(
input_names=["smoothed_normalized_files"],
output_names=["smoothed_normalized_files"],
function=dartel2mni_utils.join_smoothed_files,
),
joinsource="smoothing_node",
joinfield="smoothed_normalized_files",
name="join_smoothing_node",
)
# fmt: off
self.connect(
[
(dartel2mni_node, smoothing_node, [("normalized_files", "in_files")]),
(smoothing_node, join_smoothing_node, [("smoothed_files", "smoothed_normalized_files")]),
(join_smoothing_node, self.output_node, [("smoothed_normalized_files", "smoothed_normalized_files")]),
]
)
# fmt: on
else:
self.output_node.inputs.smoothed_normalized_files = []
# Connection
# ==========
# fmt: off
self.connect(
[
(self.input_node, unzip_tissues_node, [("native_segmentations", "in_file")]),
(self.input_node, unzip_flowfields_node, [("flowfield_files", "in_file")]),
(self.input_node, unzip_template_node, [("template_file", "in_file")]),
(unzip_tissues_node, dartel2mni_node, [("out_file", "apply_to_files")]),
(unzip_flowfields_node, dartel2mni_node, [
(("out_file", dartel2mni_utils.prepare_flowfields, self.parameters["tissues"]), "flowfield_files")]),
(unzip_template_node, dartel2mni_node, [("out_file", "template_file")]),
(dartel2mni_node, self.output_node, [("normalized_files", "normalized_files")]),
]
)
# fmt: on
| 39.466049 | 122 | 0.561117 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.